diff --git a/utils/download.go b/utils/download.go index 14dd6fd..e7c8179 100644 --- a/utils/download.go +++ b/utils/download.go @@ -10,11 +10,11 @@ import ( "strings" log "github.com/platform9/nodeadm/pkg/logrus" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/platform9/nodeadm/constants" + "github.com/containers/image/signature" + "github.com/containers/image/copy" + "github.com/containers/image/transports/alltransports" ) type Artifact struct { @@ -91,34 +91,30 @@ func PopulateCache() { } loadAvailableImages(cli) for _, image := range GetImages() { - //first check if image is already in docker cache - nameFilter := filters.NewArgs() - nameFilter.Add("reference", image) - log.Infof("Checking if image %s is available in docker cache", image) - list, err := cli.ImageList(context.Background(), types.ImageListOptions{ - Filters: nameFilter, - }) + log.Infof("processing image : %s", image) + imagebasename := strings.Split(strings.Split(image, "/")[1], ":")[0] + log.Infof("imagebasename : %s", imagebasename) + policy, err := signature.DefaultPolicy(nil) + policyContext, err := signature.NewPolicyContext(policy) + srcImage := "docker://" + image + srcRef, err := alltransports.ParseImageName(srcImage) if err != nil { - log.Fatalf("\nFailed to list images with error %v", err) + log.Fatalf("Invalid source name %s: %v", srcImage, err) } - if len(list) == 0 { - log.Infof("Trying to pull image %s", image) - cmd := exec.Command("docker", "pull", image) - err = cmd.Run() - if err != nil { - log.Fatalf("failed to run %q: %s", strings.Join(cmd.Args, " "), err) - } - + destImage := "docker-archive://" + constants.ImagesCacheDir + "/" + imagebasename + ".tar" + destRef, err := alltransports.ParseImageName(destImage) + if err != nil { + log.Fatalf("Invalid destination name %s: %v", destImage, err) } - list, err = cli.ImageList(context.Background(), types.ImageListOptions{ - Filters: nameFilter, - }) - imageFile := filepath.Join(constants.ImagesCacheDir, strings.Replace(list[0].ID, "sha256:", "", -1)+".tar") - cmd := exec.Command("docker", "save", image, "-o", imageFile) - err = cmd.Run() + + defer policyContext.Destroy() + ctx := context.Background() + _, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{}) + if err != nil { - log.Fatalf("failed to run %q: %s", strings.Join(cmd.Args, " "), err) + log.Fatalf("failed to copy %s: %s", image, err) } + } for _, file := range NodeArtifact { mode := constants.Read diff --git a/vendor/github.com/containerd/continuity/LICENSE b/vendor/github.com/containerd/continuity/LICENSE new file mode 100644 index 0000000..8f71f43 --- /dev/null +++ b/vendor/github.com/containerd/continuity/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md new file mode 100644 index 0000000..0e91ce0 --- /dev/null +++ b/vendor/github.com/containerd/continuity/README.md @@ -0,0 +1,74 @@ +# continuity + +[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity) +[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity) + +A transport-agnostic, filesystem metadata manifest system + +This project is a staging area for experiments in providing transport agnostic +metadata storage. + +Please see https://github.com/opencontainers/specs/issues/11 for more details. + +## Manifest Format + +A continuity manifest encodes filesystem metadata in Protocol Buffers. +Please refer to [proto/manifest.proto](proto/manifest.proto). + +## Usage + +Build: + +```console +$ make +``` + +Create a manifest (of this repo itself): + +```console +$ ./bin/continuity build . > /tmp/a.pb +``` + +Dump a manifest: + +```console +$ ./bin/continuity ls /tmp/a.pb +... +-rw-rw-r-- 270 B /.gitignore +-rw-rw-r-- 88 B /.mailmap +-rw-rw-r-- 187 B /.travis.yml +-rw-rw-r-- 359 B /AUTHORS +-rw-rw-r-- 11 kB /LICENSE +-rw-rw-r-- 1.5 kB /Makefile +... +-rw-rw-r-- 986 B /testutil_test.go +drwxrwxr-x 0 B /version +-rw-rw-r-- 478 B /version/version.go +``` + +Verify a manifest: + +```console +$ ./bin/continuity verify . /tmp/a.pb +``` + +Break the directory and restore using the manifest: +```console +$ chmod 777 Makefile +$ ./bin/continuity verify . /tmp/a.pb +2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r-- +$ ./bin/continuity apply . /tmp/a.pb +$ stat -c %a Makefile +664 +$ ./bin/continuity verify . /tmp/a.pb +``` + + +## Contribution Guide +### Building Proto Package + +If you change the proto file you will need to rebuild the generated Go with `go generate`. + +```console +$ go generate ./proto +``` diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go new file mode 100644 index 0000000..b43d55f --- /dev/null +++ b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -0,0 +1,85 @@ +package pathdriver + +import ( + "path/filepath" +) + +// PathDriver provides all of the path manipulation functions in a common +// interface. The context should call these and never use the `filepath` +// package or any other package to manipulate paths. +type PathDriver interface { + Join(paths ...string) string + IsAbs(path string) bool + Rel(base, target string) (string, error) + Base(path string) string + Dir(path string) string + Clean(path string) string + Split(path string) (dir, file string) + Separator() byte + Abs(path string) (string, error) + Walk(string, filepath.WalkFunc) error + FromSlash(path string) string + ToSlash(path string) string + Match(pattern, name string) (matched bool, err error) +} + +// pathDriver is a simple default implementation calls the filepath package. +type pathDriver struct{} + +// LocalPathDriver is the exported pathDriver struct for convenience. +var LocalPathDriver PathDriver = &pathDriver{} + +func (*pathDriver) Join(paths ...string) string { + return filepath.Join(paths...) +} + +func (*pathDriver) IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +func (*pathDriver) Rel(base, target string) (string, error) { + return filepath.Rel(base, target) +} + +func (*pathDriver) Base(path string) string { + return filepath.Base(path) +} + +func (*pathDriver) Dir(path string) string { + return filepath.Dir(path) +} + +func (*pathDriver) Clean(path string) string { + return filepath.Clean(path) +} + +func (*pathDriver) Split(path string) (dir, file string) { + return filepath.Split(path) +} + +func (*pathDriver) Separator() byte { + return filepath.Separator +} + +func (*pathDriver) Abs(path string) (string, error) { + return filepath.Abs(path) +} + +// Note that filepath.Walk calls os.Stat, so if the context wants to +// to call Driver.Stat() for Walk, they need to create a new struct that +// overrides this method. +func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { + return filepath.Walk(root, walkFn) +} + +func (*pathDriver) FromSlash(path string) string { + return filepath.FromSlash(path) +} + +func (*pathDriver) ToSlash(path string) string { + return filepath.ToSlash(path) +} + +func (*pathDriver) Match(pattern, name string) (bool, error) { + return filepath.Match(pattern, name) +} diff --git a/vendor/github.com/containerd/continuity/vendor.conf b/vendor/github.com/containerd/continuity/vendor.conf new file mode 100644 index 0000000..7c80dee --- /dev/null +++ b/vendor/github.com/containerd/continuity/vendor.conf @@ -0,0 +1,13 @@ +bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748 +github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626 +github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf +github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265 +github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b +github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489 +github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea +golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94 +golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2 +golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c +golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924 diff --git a/vendor/github.com/containers/buildah/LICENSE b/vendor/github.com/containers/buildah/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/github.com/containers/buildah/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md new file mode 100644 index 0000000..827d5a8 --- /dev/null +++ b/vendor/github.com/containers/buildah/README.md @@ -0,0 +1,129 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/master/logos/buildah-logo_large.png) + +# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images + +[![Go Report Card](https://goreportcard.com/badge/github.com/containers/buildah)](https://goreportcard.com/report/github.com/containers/buildah) +[![Travis](https://travis-ci.org/containers/buildah.svg?branch=master)](https://travis-ci.org/containers/buildah) + +The Buildah package provides a command line tool that can be used to +* create a working container, either from scratch or using an image as a starting point +* create an image, either from a working container or via the instructions in a Dockerfile +* images can be built in either the OCI image format or the traditional upstream docker image format +* mount a working container's root filesystem for manipulation +* unmount a working container's root filesystem +* use the updated contents of a container's root filesystem as a filesystem layer to create a new image +* delete a working container or an image +* rename a local container + +## Buildah Information for Developers + +For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website! + +**[Buildah Demos](demos)** + +**[Changelog](CHANGELOG.md)** + +**[Contributing](CONTRIBUTING.md)** + +**[Development Plan](developmentplan.md)** + +**[Installation notes](install.md)** + +**[Troubleshooting Guide](troubleshooting.md)** + +**[Tutorials](docs/tutorials)** + +## Buildah and Podman relationship + +Buildah and Podman are two complementary open-source projects that are +available on most Linux platforms and both projects reside at +[GitHub.com](https://github.com) with Buildah +[here](https://github.com/containers/buildah) and Podman +[here](https://github.com/containers/libpod). Both, Buildah and Podman are +command line tools that work on Open Container Initiative (OCI) images and +containers. The two projects differentiate in their specialization. + +Buildah specializes in building OCI images. Buildah's commands replicate all +of the commands that are found in a Dockerfile. This allows building images +with and without Dockerfiles while not requiring any root privileges. +Buildah’s ultimate goal is to provide a lower-level coreutils interface to +build images. The flexibility of building images without Dockerfiles allows +for the integration of other scripting languages into the build process. +Buildah follows a simple fork-exec model and does not run as a daemon +but it is based on a comprehensive API in golang, which can be vendored +into other tools. + +Podman specializes in all of the commands and functions that help you to maintain and modify +OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers +created from those images. + +A major difference between Podman and Buildah is their concept of a container. Podman +allows users to create "traditional containers" where the intent of these containers is +to be long lived. While Buildah containers are really just created to allow content +to be added back to the container image. An easy way to think of it is the +`buildah run` command emulates the RUN command in a Dockerfile while the `podman run` +command emulates the `docker run` command in functionality. Because of this and their underlying +storage differences, you can not see Podman containers from within Buildah or vice versa. + +In short, Buildah is an efficient way to create OCI images while Podman allows +you to manage and maintain those images and containers in a production environment using +familiar container cli commands. For more details, see the +[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools). + +## Example + +From [`./examples/lighttpd.sh`](examples/lighttpd.sh): + +```bash +$ cat > lighttpd.sh <<"EOF" +#!/bin/bash -x + +ctr1=$(buildah from "${1:-fedora}") + +## Get all updates and install our minimal httpd server +buildah run "$ctr1" -- dnf update -y +buildah run "$ctr1" -- dnf install -y lighttpd + +## Include some buildtime annotations +buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1" + +## Run our server and expose the port +buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1" +buildah config --port 80 "$ctr1" + +## Commit this container to an image name +buildah commit "$ctr1" "${2:-$USER/lighttpd}" +EOF + +$ chmod +x lighttpd.sh +$ sudo ./lighttpd.sh +``` + +## Commands +| Command | Description | +| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. | +| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. | +| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. | +| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. | +| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. | +| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. | +| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | +| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. | +| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. | +| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. | +| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. | +| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. | +| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. | +| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. | +| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. | +| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. | +| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. | +| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. | +| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. | +| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. | +| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information | + +**Future goals include:** +* more CI tests +* additional CLI commands (?) diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare.c b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c new file mode 100644 index 0000000..fd0d48d --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c @@ -0,0 +1,287 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Open Source projects like conda-forge, want to package podman and are based + off of centos:6, Conda-force has minimal libc requirements and is lacking + the memfd.h file, so we use mmam.h +*/ +#ifndef MFD_ALLOW_SEALING +#define MFD_ALLOW_SEALING 2U +#endif +#ifndef MFD_CLOEXEC +#define MFD_CLOEXEC 1U +#endif + +#ifndef F_LINUX_SPECIFIC_BASE +#define F_LINUX_SPECIFIC_BASE 1024 +#endif +#ifndef F_ADD_SEALS +#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) +#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) +#endif +#ifndef F_SEAL_SEAL +#define F_SEAL_SEAL 0x0001LU +#endif +#ifndef F_SEAL_SHRINK +#define F_SEAL_SHRINK 0x0002LU +#endif +#ifndef F_SEAL_GROW +#define F_SEAL_GROW 0x0004LU +#endif +#ifndef F_SEAL_WRITE +#define F_SEAL_WRITE 0x0008LU +#endif + +#define BUFSTEP 1024 + +static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; +static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; + +static int _containers_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +static void _check_proc_sys_file(const char *path) +{ + FILE *fp; + char buf[32]; + size_t n_read; + long r; + + fp = fopen(path, "r"); + if (fp == NULL) { + if (errno != ENOENT) + fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces); + } else { + memset(buf, 0, sizeof(buf)); + n_read = fread(buf, 1, sizeof(buf) - 1, fp); + if (n_read > 0) { + r = atoi(buf); + if (r == 0) { + fprintf(stderr, "User namespaces are not enabled in %s.\n", path); + } + } else { + fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path); + } + fclose(fp); + } +} + +static char **parse_proc_stringlist(const char *list) { + int fd, n, i, n_strings; + char *buf, *new_buf, **ret; + size_t size, new_size, used; + + fd = open(list, O_RDONLY); + if (fd == -1) { + return NULL; + } + buf = NULL; + size = 0; + used = 0; + for (;;) { + new_size = used + BUFSTEP; + new_buf = realloc(buf, new_size); + if (new_buf == NULL) { + free(buf); + fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP)); + return NULL; + } + buf = new_buf; + size = new_size; + memset(buf + used, '\0', size - used); + n = read(fd, buf + used, size - used - 1); + if (n < 0) { + fprintf(stderr, "read(): %m\n"); + return NULL; + } + if (n == 0) { + break; + } + used += n; + } + close(fd); + n_strings = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + n_strings++; + } + } + ret = calloc(n_strings + 1, sizeof(char *)); + if (ret == NULL) { + fprintf(stderr, "calloc(): out of memory\n"); + return NULL; + } + i = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + ret[i++] = &buf[n]; + } + } + ret[i] = NULL; + return ret; +} + +static int containers_reexec(void) { + char **argv, *exename; + int fd, mmfd, n_read, n_written; + struct stat st; + char buf[2048]; + + argv = parse_proc_stringlist("/proc/self/cmdline"); + if (argv == NULL) { + return -1; + } + fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC); + if (fd == -1) { + fprintf(stderr, "open(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (fstat(fd, &st) == -1) { + fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n"); + return -1; + } + exename = basename(argv[0]); + mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC); + if (mmfd == -1) { + fprintf(stderr, "memfd_create(): %m\n"); + return -1; + } + for (;;) { + n_read = read(fd, buf, sizeof(buf)); + if (n_read < 0) { + fprintf(stderr, "read(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (n_read == 0) { + break; + } + n_written = write(mmfd, buf, n_read); + if (n_written < 0) { + fprintf(stderr, "write(anonfd): %m\n"); + return -1; + } + if (n_written != n_read) { + fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read); + return -1; + } + } + close(fd); + if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) { + close(mmfd); + fprintf(stderr, "Error sealing memfd copy: %m\n"); + return -1; + } + if (fexecve(mmfd, argv, environ) == -1) { + close(mmfd); + fprintf(stderr, "Error during reexec(...): %m\n"); + return -1; + } + return 0; +} + +void _containers_unshare(void) +{ + int flags, pidfd, continuefd, n, pgrp, sid, ctty; + char buf[2048]; + + flags = _containers_unshare_parse_envint("_Containers-unshare"); + if (flags == -1) { + return; + } + if ((flags & CLONE_NEWUSER) != 0) { + if (unshare(CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); + _check_proc_sys_file (_max_user_namespaces); + _check_proc_sys_file (_unprivileged_user_namespaces); + _exit(1); + } + } + pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _containers_unshare_parse_envint("_Containers-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); + if (pgrp == 1) { + if (setpgrp() == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _containers_unshare_parse_envint("_Containers-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } + if ((flags & CLONE_NEWUSER) != 0) { + if (setresgid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresgid(0): %m\n"); + _exit(1); + } + if (setresuid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresuid(0): %m\n"); + _exit(1); + } + } + if ((flags & ~CLONE_NEWUSER) != 0) { + if (unshare(flags & ~CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(...): %m\n"); + _exit(1); + } + } + if (containers_reexec() != 0) { + _exit(1); + } + return; +} diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go new file mode 100644 index 0000000..21b102c --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go @@ -0,0 +1,572 @@ +// +build linux + +package unshare + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "os/exec" + "os/user" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and +// handles setting ID maps and other related settings by triggering +// initialization code in the child. +type Cmd struct { + *exec.Cmd + UnshareFlags int + UseNewuidmap bool + UidMappings []specs.LinuxIDMapping + UseNewgidmap bool + GidMappings []specs.LinuxIDMapping + GidMappingsEnableSetgroups bool + Setsid bool + Setpgrp bool + Ctty *os.File + OOMScoreAdj *int + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set an environment variable to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags)) + + // Please the libpod "rootless" package to find the expected env variables. + if os.Geteuid() != 0 { + c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done") + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid())) + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", os.Getegid())) + } + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Containers-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Containers-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + pidString := "" + b := new(bytes.Buffer) + io.Copy(b, pidRead) + pidString = b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return errors.Wrapf(err, "error parsing PID %q", pidString) + } + pidString = fmt.Sprintf("%d", pid) + + // If we created a new user namespace, set any specified mappings. + if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { + // Always set "setgroups". + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if c.GidMappingsEnableSetgroups { + if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) + return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString) + } + } else { + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) + return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString) + } + } + + if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { + uidmap, gidmap, err := GetHostIDMappings("") + if err != nil { + fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) + return errors.Wrapf(err, "error reading ID mappings in parent") + } + if len(c.UidMappings) == 0 { + c.UidMappings = uidmap + for i := range c.UidMappings { + c.UidMappings[i].HostID = c.UidMappings[i].ContainerID + } + } + if len(c.GidMappings) == 0 { + c.GidMappings = gidmap + for i := range c.GidMappings { + c.GidMappings[i].HostID = c.GidMappings[i].ContainerID + } + } + } + + if len(c.GidMappings) > 0 { + // Build the GID map, since writing to the proc file has to be done all at once. + g := new(bytes.Buffer) + for _, m := range c.GidMappings { + fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + gidmapSet := false + // Set the GID map. + if c.UseNewgidmap { + cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + g.Reset() + cmd.Stdout = g + cmd.Stderr = g + err := cmd.Run() + if err == nil { + gidmapSet = true + } else { + logrus.Warnf("error running newgidmap: %v: %s", err, g.String()) + logrus.Warnf("falling back to single mapping") + g.Reset() + g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + } + } + if !gidmapSet { + if c.UseNewgidmap { + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString) + } + } + gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString) + } + defer gidmap.Close() + if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) + return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString) + } + } + } + + if len(c.UidMappings) > 0 { + // Build the UID map, since writing to the proc file has to be done all at once. + u := new(bytes.Buffer) + for _, m := range c.UidMappings { + fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + uidmapSet := false + // Set the GID map. + if c.UseNewuidmap { + cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + u.Reset() + cmd.Stdout = u + cmd.Stderr = u + err := cmd.Run() + if err == nil { + uidmapSet = true + } else { + logrus.Warnf("error running newuidmap: %v: %s", err, u.String()) + logrus.Warnf("falling back to single mapping") + u.Reset() + u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + } + } + if !uidmapSet { + uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString) + } + defer uidmap.Close() + if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) + return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString) + } + } + } + } + + if c.OOMScoreAdj != nil { + oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString) + } + defer oomScoreAdj.Close() + if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { + fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) + return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString) + } + } + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} + +var ( + isRootlessOnce sync.Once + isRootless bool +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + isRootlessOnce.Do(func() { + isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != "" + }) + return isRootless +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=done") +} + +type Runnable interface { + Run() error +} + +func bailOnError(err error, format string, a ...interface{}) { + if err != nil { + if format != "" { + logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) + } else { + logrus.Errorf("%v", err) + } + os.Exit(1) + } +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { + // If we've already been through this once, no need to try again. + if os.Geteuid() == 0 && IsRootless() { + return + } + + var uidNum, gidNum uint64 + // Figure out who we are. + me, err := user.Current() + if !os.IsNotExist(err) { + bailOnError(err, "error determining current user") + uidNum, err = strconv.ParseUint(me.Uid, 10, 32) + bailOnError(err, "error parsing current UID %s", me.Uid) + gidNum, err = strconv.ParseUint(me.Gid, 10, 32) + bailOnError(err, "error parsing current GID %s", me.Gid) + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // ID mappings to use to reexec ourselves. + var uidmap, gidmap []specs.LinuxIDMapping + if uidNum != 0 || evenForRoot { + // Read the set of ID mappings that we're allowed to use. Each + // range in /etc/subuid and /etc/subgid file is a starting host + // ID and a range size. + uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) + if err != nil { + logrus.Warnf("error reading allowed ID mappings: %v", err) + } + if len(uidmap) == 0 { + logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) + } + if len(gidmap) == 0 { + logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username) + } + // Map our UID and GID, then the subuid and subgid ranges, + // consecutively, starting at 0, to get the mappings to use for + // a copy of ourselves. + uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...) + gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...) + var rangeStart uint32 + for i := range uidmap { + uidmap[i].ContainerID = rangeStart + rangeStart += uidmap[i].Size + } + rangeStart = 0 + for i := range gidmap { + gidmap[i].ContainerID = rangeStart + rangeStart += gidmap[i].Size + } + } else { + // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able + // to use unshare(), so don't bother creating a new user namespace at this point. + capabilities, err := capability.NewPid(0) + bailOnError(err, "error reading the current capabilities sets") + if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { + return + } + // Read the set of ID mappings that we're currently using. + uidmap, gidmap, err = GetHostIDMappings("") + bailOnError(err, "error reading current ID mappings") + // Just reuse them. + for i := range uidmap { + uidmap[i].HostID = uidmap[i].ContainerID + } + for i := range gidmap { + gidmap[i].HostID = gidmap[i].ContainerID + } + } + + // Unlike most uses of reexec or unshare, we're using a name that + // _won't_ be recognized as a registered reexec handler, since we + // _want_ to fall through reexec.Init() to the normal main(). + cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...) + + // If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again. + err = os.Setenv(UsernsEnvName, "1") + bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) + + // Set the default isolation type to use the "rootless" method. + if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { + if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err) + os.Exit(1) + } + } + } + + // Reuse our stdio. + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // Set up a new user namespace with the ID mapping. + cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS + cmd.UseNewuidmap = uidNum != 0 + cmd.UidMappings = uidmap + cmd.UseNewgidmap = uidNum != 0 + cmd.GidMappings = gidmap + cmd.GidMappingsEnableSetgroups = true + + // Finish up. + logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + ExecRunnable(cmd) +} + +// ExecRunnable runs the specified unshare command, captures its exit status, +// and exits with the same status. +func ExecRunnable(cmd Runnable) { + if err := cmd.Run(); err != nil { + if exitError, ok := errors.Cause(err).(*exec.ExitError); ok { + if exitError.ProcessState.Exited() { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + logrus.Errorf("%v", exitError) + os.Exit(waitStatus.ExitStatus()) + } + if waitStatus.Signaled() { + logrus.Errorf("%v", exitError) + os.Exit(int(waitStatus.Signal()) + 128) + } + } + } + } + logrus.Errorf("%v", err) + logrus.Errorf("(unable to determine exit status)") + os.Exit(1) + } + os.Exit(0) +} + +// getHostIDMappings reads mappings from the named node under /proc. +func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { + var mappings []specs.LinuxIDMapping + f, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error reading ID mappings from %q", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 3 { + return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + } + cid, err := strconv.ParseUint(fields[0], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) + } + hid, err := strconv.ParseUint(fields[1], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) + } + size, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) + } + mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) + } + return mappings, nil +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + if pid == "" { + pid = "self" + } + uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, nil, err + } + gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, nil, err + } + return uidmap, gidmap, nil +} + +// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. +func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + mappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group) + } + var uidmap, gidmap []specs.LinuxIDMapping + for _, m := range mappings.UIDs() { + uidmap = append(uidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + for _, m := range mappings.GIDs() { + gidmap = append(gidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go new file mode 100644 index 0000000..b3f8099 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go @@ -0,0 +1,10 @@ +// +build linux,cgo,!gccgo + +package unshare + +// #cgo CFLAGS: -Wall +// extern void _containers_unshare(void); +// void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go new file mode 100644 index 0000000..2f95da7 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go @@ -0,0 +1,25 @@ +// +build linux,cgo,gccgo + +package unshare + +// #cgo CFLAGS: -Wall -Wextra +// extern void _containers_unshare(void); +// void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" + +// This next bit is straight out of libcontainer. + +// AlwaysFalse is here to stay false +// (and be exported so the compiler doesn't optimize out its reference) +var AlwaysFalse bool + +func init() { + if AlwaysFalse { + // by referencing this C init() in a noop test, it will ensure the compiler + // links in the C function. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 + C.init() + } +} diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go new file mode 100644 index 0000000..bf4d567 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go @@ -0,0 +1,45 @@ +// +build !linux + +package unshare + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return false +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf new file mode 100644 index 0000000..8814894 --- /dev/null +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -0,0 +1,69 @@ +github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 +github.com/blang/semver v3.5.0 +github.com/BurntSushi/toml v0.2.0 +github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d +github.com/containernetworking/cni v0.7.0-rc2 +github.com/containers/image v2.0.0 +github.com/cyphar/filepath-securejoin v0.2.1 +github.com/vbauerster/mpb v3.3.4 +github.com/mattn/go-isatty v0.0.4 +github.com/VividCortex/ewma v1.1.1 +github.com/containers/storage v1.12.10 +github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 +github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83 +github.com/docker/docker-credential-helpers v0.6.1 +github.com/docker/go-connections v0.4.0 +github.com/docker/go-units v0.3.2 +github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 +github.com/docker/libnetwork 1a06131fb8a047d919f7deaf02a4c414d7884b83 +github.com/fsouza/go-dockerclient v1.3.0 +github.com/ghodss/yaml v1.0.0 +github.com/gogo/protobuf v1.2.0 +github.com/gorilla/context v1.1.1 +github.com/gorilla/mux v1.6.2 +github.com/hashicorp/errwrap v1.0.0 +github.com/hashicorp/go-multierror v1.0.0 +github.com/imdario/mergo v0.3.6 +github.com/mattn/go-shellwords v1.0.3 +github.com/Microsoft/go-winio v0.4.11 +github.com/Microsoft/hcsshim v0.8.3 +github.com/mistifyio/go-zfs v2.1.1 +github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c +github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 +# TODO: Gotty has not been updated since 2012. Can we find a replacement? +github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 +github.com/opencontainers/image-spec v1.0.0 +github.com/opencontainers/runc v1.0.0-rc6 +github.com/opencontainers/runtime-spec v1.0.0 +github.com/opencontainers/runtime-tools v0.8.0 +github.com/opencontainers/selinux v1.1 +github.com/openshift/imagebuilder v1.1.0 +github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b +github.com/pkg/errors v0.8.1 +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +github.com/seccomp/libseccomp-golang v0.9.0 +github.com/seccomp/containers-golang v0.1 +github.com/sirupsen/logrus v1.0.0 +github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 +github.com/tchap/go-patricia v2.2.6 +github.com/ulikunitz/xz v0.5.5 +github.com/vbatts/tar-split v0.10.2 +github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6 +github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b +github.com/xeipuuv/gojsonschema v1.1.0 +golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 https://github.com/golang/crypto +golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 https://github.com/golang/net +golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync +golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/golang/sys +golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text +gopkg.in/yaml.v2 v2.2.2 +k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go +github.com/klauspost/pgzip v1.2.1 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 +github.com/onsi/gomega v1.4.3 +github.com/spf13/cobra v0.0.3 +github.com/spf13/pflag v1.0.3 +github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb +github.com/etcd-io/bbolt v1.3.2 diff --git a/vendor/github.com/containers/image/LICENSE b/vendor/github.com/containers/image/LICENSE new file mode 100644 index 0000000..9535635 --- /dev/null +++ b/vendor/github.com/containers/image/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md new file mode 100644 index 0000000..571e834 --- /dev/null +++ b/vendor/github.com/containers/image/README.md @@ -0,0 +1,83 @@ +[![GoDoc](https://godoc.org/github.com/containers/image?status.svg)](https://godoc.org/github.com/containers/image) [![Build Status](https://travis-ci.org/containers/image.svg?branch=master)](https://travis-ci.org/containers/image) += + +`image` is a set of Go libraries aimed at working in various way with +containers' images and container image registries. + +The containers/image library allows application to pull and push images from +container image registries, like the upstream docker registry. It also +implements "simple image signing". + +The containers/image library also allows you to inspect a repository on a +container registry without pulling down the image. This means it fetches the +repository's manifest and it is able to show you a `docker inspect`-like json +output about a whole repository or a tag. This library, in contrast to `docker +inspect`, helps you gather useful information about a repository or a tag +without requiring you to run `docker pull`. + +The containers/image library also allows you to translate from one image format +to another, for example docker container images to OCI images. It also allows +you to copy container images between various registries, possibly converting +them as necessary, and to sign and verify images. + +## Command-line usage + +The containers/image project is only a library with no user interface; +you can either incorporate it into your Go programs, or use the `skopeo` tool: + +The [skopeo](https://github.com/containers/skopeo) tool uses the +containers/image library and takes advantage of many of its features, +e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality. + +## Dependencies + +This library does not ship a committed version of its dependencies in a `vendor` +subdirectory. This is so you can make well-informed decisions about which +libraries you should use with this package in your own projects, and because +types defined in the `vendor` directory would be impossible to use from your projects. + +What this project tests against dependencies-wise is located +[in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf). + +## Building + +If you want to see what the library can do, or an example of how it is called, +consider starting with the [skopeo](https://github.com/containers/skopeo) tool +instead. + +To integrate this library into your project, put it into `$GOPATH` or use +your preferred vendoring tool to include a copy in your project. +Ensure that the dependencies documented [in vendor.conf](https://github.com/containers/image/blob/master/vendor.conf) +are also available +(using those exact versions or different versions of your choosing). + +This library, by default, also depends on the GpgME and libostree C libraries. Either install them: +```sh +Fedora$ dnf install gpgme-devel libassuan-devel ostree-devel +macOS$ brew install gpgme +``` +or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`) + +### Supported build tags + +- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation; +the primary downside is that creating new signatures with the Golang-only implementation is not supported. +- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled +and impossible to import when this build tag is in use. + +## [Contributing](CONTRIBUTING.md) + +Information about contributing to this project. + +When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation. + +## License + +Apache License 2.0 + +SPDX-License-Identifier: Apache-2.0 + +## Contact + +- Mailing list: [containers-dev](https://groups.google.com/forum/?hl=en#!forum/containers-dev) +- IRC: #[container-projects](irc://irc.freenode.net:6667/#container-projects) on freenode.net diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go new file mode 100644 index 0000000..3ed8a2b --- /dev/null +++ b/vendor/github.com/containers/image/copy/copy.go @@ -0,0 +1,903 @@ +package copy + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache" + "github.com/containers/image/pkg/compression" + "github.com/containers/image/signature" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sync/semaphore" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// maxParallelDownloads is used to limit the maxmimum number of parallel +// downloads. Let's follow Firefox by limiting it to 6. +var maxParallelDownloads = 6 + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + if err := expectedDigest.Validate(); err != nil { + return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + } + return &digestingReader{ + source: source, + digester: digestAlgorithm.Digester(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} + +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +type copier struct { + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache types.BlobInfoCache + copyInParallel bool +} + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + canModifyManifest bool + canSubstituteBlobs bool +} + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + ForceManifestMIMEType string +} + +// Image copies image from srcRef to destRef, using policyContext to validate +// source image admissibility. It returns the manifest which was written to +// the new copy of the image. +func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) { + // NOTE this function uses an output parameter for the error return value. + // Setting this and returning is the ideal way to return an error. + // + // the defers in this routine will wrap the error return with its own errors + // which can be valuable context in the middle of a multi-streamed copy. + if options == nil { + options = &Options{} + } + + reportWriter := ioutil.Discard + + if options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + + dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) + } + defer func() { + if err := dest.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (dest: %v)", err) + } + }() + + rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) + } + defer func() { + if err := rawSource.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (src: %v)", err) + } + }() + + // If reportWriter is not a TTY (e.g., when piping to a file), do not + // print the progress bars to avoid long and hard to parse output. + // createProgressBar() will print a single line instead. + progressOutput := reportWriter + if !isTTY(reportWriter) { + progressOutput = ioutil.Discard + } + copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() + c := &copier{ + dest: dest, + rawSource: rawSource, + reportWriter: reportWriter, + progressOutput: progressOutput, + progressInterval: options.ProgressInterval, + progress: options.Progress, + copyInParallel: copyInParallel, + // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually + // we might want to add a separate CommonCtx — or would that be too confusing? + blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), + } + + unparsedToplevel := image.UnparsedInstance(rawSource, nil) + multiImage, err := isMultiImage(ctx, unparsedToplevel) + if err != nil { + return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) + } + + if !multiImage { + // The simple case: Just copy a single image. + if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil { + return nil, err + } + } else { + // This is a manifest list. Choose a single image and copy it. + // FIXME: Copy to destinations which support manifest lists, one image at a time. + instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel) + if err != nil { + return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + + if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil { + return nil, err + } + } + + if err := c.dest.Commit(ctx); err != nil { + return nil, errors.Wrap(err, "Error committing the finished image") + } + + return manifest, nil +} + +// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + } + if multiImage { + return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (the multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return nil, errors.Wrap(err, "Source image rejected") + } + src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) + if err != nil { + return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + sourceManifest, _, err := src.Manifest(ctx) + if err != nil { + return nil, errors.Wrapf(err, "Error reading manifest from source image") + } + matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) + if err != nil { + return nil, errors.Wrapf(err, "Error computing digest of source image's manifest") + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil { + return nil, err + } + + var sigs [][]byte + if options.RemoveSignatures { + sigs = [][]byte{} + } else { + c.Printf("Getting image source signatures\n") + s, err := src.Signatures(ctx) + if err != nil { + return nil, errors.Wrap(err, "Error reading signatures") + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("Checking if image destination supports signatures\n") + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, errors.Wrap(err, "Can not copy signatures") + } + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, + } + // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return nil, err + } + + // We compute preferredManifestMIMEType only to show it in error messages. + // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. + preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) + if err != nil { + return nil, err + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + if err := ic.copyLayers(ctx); err != nil { + return nil, err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type. If the process succeeds, fine… + manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx) + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) + // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. + if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return nil, err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if !ic.canModifyManifest { + return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} + for _, manifestMIMEType := range otherManifestMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + manifestBytes = attemptedManifest + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + + if options.SignBy != "" { + newSig, err := c.createSignature(manifestBytes, options.SignBy) + if err != nil { + return nil, err + } + sigs = append(sigs, newSig) + } + + c.Printf("Storing signatures\n") + if err := c.dest.PutSignatures(ctx, sigs); err != nil { + return nil, errors.Wrap(err, "Error writing signatures") + } + + return manifestBytes, nil +} + +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...interface{}) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { + if dest.MustMatchRuntimeOS() { + wantedOS := runtime.GOOS + if sys != nil && sys.OSChoice != "" { + wantedOS = sys.OSChoice + } + c, err := src.OCIConfig(ctx) + if err != nil { + return errors.Wrapf(err, "Error parsing image configuration") + } + osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) + if wantedOS == "windows" && c.OS == "linux" { + return osErr + } else if wantedOS != "windows" && c.OS == "windows" { + return osErr + } + } + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if !ic.canModifyManifest { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", + transports.ImageName(ic.c.dest.Reference()), destRef.String()) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +// isTTY returns true if the io.Writer is a file and a tty. +func isTTY(w io.Writer) bool { + if f, ok := w.(*os.File); ok { + return terminal.IsTerminal(int(f.Fd())) + } + return false +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +func (ic *imageCopier) copyLayers(ctx context.Context) error { + srcInfos := ic.src.LayerInfos() + numLayers := len(srcInfos) + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if !ic.canModifyManifest { + return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + copyGroup.Add(numLayers) + + // copySemaphore is used to limit the number of parallel downloads to + // avoid malicious images causing troubles and to be nice to servers. + var copySemaphore *semaphore.Weighted + if ic.c.copyInParallel { + copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) + } else { + copySemaphore = semaphore.NewWeighted(int64(1)) + } + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { + defer copySemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) + } + data[index] = cld + } + + func() { // A scope for defer + progressPool, progressCleanup := ic.c.newProgressPool(ctx) + defer progressCleanup() + + for i, srcLayer := range srcInfos { + copySemaphore.Acquire(ctx, 1) + go copyLayerHelper(i, srcLayer, progressPool) + } + + // Wait for all layers to be copied + copyGroup.Wait() + }() + + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + for i, cld := range data { + if cld.err != nil { + return cld.err + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + return nil +} + +// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + if len(a) != len(b) { + return true + } + for i := range a { + if a[i].Digest != b[i].Digest { + return true + } + } + return false +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) { + pendingImage := ic.src + if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { + if !ic.canModifyManifest { + return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, errors.Wrap(err, "Error creating an updated image manifest") + } + pendingImage = pi + } + manifest, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, errors.Wrap(err, "Error reading manifest") + } + + if err := ic.c.copyConfig(ctx, pendingImage); err != nil { + return nil, err + } + + ic.c.Printf("Writing manifest to image destination\n") + if err := ic.c.dest.PutManifest(ctx, manifest); err != nil { + return nil, errors.Wrap(err, "Error writing manifest") + } + return manifest, nil +} + +// newProgressPool creates a *mpb.Progress and a cleanup function. +// The caller must eventually call the returned cleanup function after the pool will no longer be updated. +func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { + ctx, cancel := context.WithCancel(ctx) + pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx)) + return pool, func() { + cancel() + pool.Wait() + } +} + +// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter +// is ioutil.Discard, the progress bar's output will be discarded +func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + bar := pool.AddBar(info.Size, + mpb.BarClearOnComplete(), + mpb.PrependDecorators( + decor.Name(prefix), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), + ), + ) + if c.progressOutput == ioutil.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return bar +} + +// copyConfig copies config.json, if any, from src to dest. +func (c *copier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) + } + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool, progressCleanup := c.newProgressPool(ctx) + defer progressCleanup() + bar := c.createProgressBar(progressPool, srcInfo, "config", "done") + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) + if err != nil { + return types.BlobInfo{}, err + } + bar.SetTotal(int64(len(configBlob)), true) + return destInfo, nil + }() + if err != nil { + return nil + } + if destInfo.Digest != srcInfo.Digest { + return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + + // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. + if !diffIDIsNeeded { + reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") + bar.SetTotal(0, true) + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + diffID = diffIDResult.digest + } + } + + bar.SetTotal(srcInfo.Size, true) + return blobInfo, diffID, nil +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, +// perhaps compressing the stream if canCompress, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied blob. +func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, + canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers. + // === Input: srcStream + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) + } + var destStream io.Reader = digestingReader + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) + } + isCompressed := decompressor != nil + destStream = bar.ProxyReader(destStream) + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) + originalLayerReader = destStream + } + + // === Deal with layer compression/decompression if necessary + var inputInfo types.BlobInfo + var compressionOperation types.LayerCompression + if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { + logrus.Debugf("Compressing blob on the fly") + compressionOperation = types.Compress + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { + logrus.Debugf("Blob will be decompressed") + compressionOperation = types.Decompress + s, err := decompressor(destStream) + if err != nil { + return types.BlobInfo{}, err + } + defer s.Close() + destStream = s + inputInfo.Digest = "" + inputInfo.Size = -1 + } else { + logrus.Debugf("Using original blob without modification") + compressionOperation = types.PreserveOriginal + inputInfo = srcInfo + } + + // === Report progress using the c.progress channel, if required. + if c.progress != nil && c.progressInterval > 0 { + destStream = &progressReader{ + source: destStream, + channel: c.progress, + interval: c.progressInterval, + artifact: srcInfo, + lastTime: time.Now(), + } + } + + // === Finally, send the layer stream to dest. + uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(ioutil.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { + return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } + } + return uploadedInfo, nil +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func compressGoroutine(dest *io.PipeWriter, src io.Reader) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + zipper := pgzip.NewWriter(dest) + defer zipper.Close() + + _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() +} diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go new file mode 100644 index 0000000..e8cc8a9 --- /dev/null +++ b/vendor/github.com/containers/image/copy/manifest.go @@ -0,0 +1,121 @@ +package copy + +import ( + "context" + "strings" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. +type orderedSet struct { + list []string + included map[string]struct{} +} + +// newOrderedSet creates a correctly initialized orderedSet. +// [Sometimes it would be really nice if Golang had constructors…] +func newOrderedSet() *orderedSet { + return &orderedSet{ + list: []string{}, + included: map[string]struct{}{}, + } +} + +// append adds s to the end of os, only if it is not included already. +func (os *orderedSet) append(s string) { + if _, ok := os.included[s]; !ok { + os.list = append(os.list, s) + os.included[s] = struct{}{} + } +} + +// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. +// Note that the conversion will only happen later, through ic.src.UpdatedImage +// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), +// and a list of other possible alternatives, in order. +func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { + _, srcType, err := ic.src.Manifest(ctx) + if err != nil { // This should have been cached?! + return "", nil, errors.Wrap(err, "Error reading manifest") + } + normalizedSrcType := manifest.NormalizedMIMEType(srcType) + if srcType != normalizedSrcType { + logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) + srcType = normalizedSrcType + } + + if forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + } + + if len(destSupportedManifestMIMETypes) == 0 { + return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. + } + supportedByDest := map[string]struct{}{} + for _, t := range destSupportedManifestMIMETypes { + supportedByDest[t] = struct{}{} + } + + // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. + // So, build a list of types to try in order of decreasing preference. + // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, + // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. + // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types + // and never attempt the other one. + prioritizedTypes := newOrderedSet() + + // First of all, prefer to keep the original manifest unmodified. + if _, ok := supportedByDest[srcType]; ok { + prioritizedTypes.append(srcType) + } + if !ic.canModifyManifest { + // We could also drop the !ic.canModifyManifest check and have the caller + // make the choice; it is already doing that to an extent, to improve error + // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” + // special case in here; the caller can then worry (or not) only about a good UI. + logrus.Debugf("We can't modify the manifest, hoping for the best...") + return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? + } + + // Then use our list of preferred types. + for _, t := range preferredManifestMIMETypes { + if _, ok := supportedByDest[t]; ok { + prioritizedTypes.append(t) + } + } + + // Finally, try anything else the destination supports. + for _, t := range destSupportedManifestMIMETypes { + prioritizedTypes.append(t) + } + + logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. + return "", nil, errors.New("Internal error: no candidate MIME types") + } + preferredType := prioritizedTypes.list[0] + if preferredType != srcType { + ic.manifestUpdates.ManifestMIMEType = preferredType + } else { + logrus.Debugf("... will first try using the original manifest unmodified") + } + return preferredType, prioritizedTypes.list[1:], nil +} + +// isMultiImage returns true if img is a list of images +func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest(ctx) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} diff --git a/vendor/github.com/containers/image/copy/progress_reader.go b/vendor/github.com/containers/image/copy/progress_reader.go new file mode 100644 index 0000000..b670ee5 --- /dev/null +++ b/vendor/github.com/containers/image/copy/progress_reader.go @@ -0,0 +1,28 @@ +package copy + +import ( + "io" + "time" + + "github.com/containers/image/types" +) + +// progressReader is a reader that reports its progress on an interval. +type progressReader struct { + source io.Reader + channel chan types.ProgressProperties + interval time.Duration + artifact types.BlobInfo + lastTime time.Time + offset uint64 +} + +func (r *progressReader) Read(p []byte) (int, error) { + n, err := r.source.Read(p) + r.offset += uint64(n) + if time.Since(r.lastTime) > r.interval { + r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} + r.lastTime = time.Now() + } + return n, err +} diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go new file mode 100644 index 0000000..91394d2 --- /dev/null +++ b/vendor/github.com/containers/image/copy/sign.go @@ -0,0 +1,31 @@ +package copy + +import ( + "github.com/containers/image/signature" + "github.com/containers/image/transports" + "github.com/pkg/errors" +) + +// createSignature creates a new signature of manifest using keyIdentity. +func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, errors.Wrap(err, "Error initializing GPG") + } + defer mech.Close() + if err := mech.SupportsSigning(); err != nil { + return nil, errors.Wrap(err, "Signing not supported") + } + + dockerReference := c.dest.Reference().DockerReference() + if dockerReference == nil { + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + + c.Printf("Signing manifest\n") + newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) + if err != nil { + return nil, errors.Wrap(err, "Error creating signature") + } + return newSig, nil +} diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go new file mode 100644 index 0000000..4b2ab02 --- /dev/null +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -0,0 +1,260 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const version = "Directory Transport Version: 1.1\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + +type dirImageDestination struct { + ref dirReference + compress bool +} + +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { + d := &dirImageDestination{ref: ref, compress: compress} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := ioutil.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + if err != nil { + return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dirImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dirImageDestination) Close() error { + return nil +} + +func (d *dirImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.compress { + return types.Compress + } + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dirImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dirImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + defer func() { + blobFile.Close() + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + blobPath := d.ref.layerPath(computedDigest) + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil + +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error { + return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) +} + +func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + for i, sig := range signatures { + if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { + return err + } + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dirImageDestination) Commit(ctx context.Context) error { + return nil +} + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if err != nil && os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go new file mode 100644 index 0000000..59b6253 --- /dev/null +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -0,0 +1,96 @@ +package directory + +import ( + "context" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type dirImageSource struct { + ref dirReference +} + +// newImageSource returns an ImageSource reading from an existing directory. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ref dirReference) types.ImageSource { + return &dirImageSource{ref} +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dirImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dirImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) + } + m, err := ioutil.ReadFile(s.ref.manifestPath()) + if err != nil { + return nil, "", err + } + return m, manifest.GuessMIMEType(m), err +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dirImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + r, err := os.Open(s.ref.layerPath(info.Digest)) + if err != nil { + return nil, -1, err + } + fi, err := r.Stat() + if err != nil { + return nil, -1, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) + } + signatures := [][]byte{} + for i := 0; ; i++ { + signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) + if err != nil { + if os.IsNotExist(err) { + break + } + return nil, err + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. +func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go new file mode 100644 index 0000000..66b9e72 --- /dev/null +++ b/vendor/github.com/containers/image/directory/directory_transport.go @@ -0,0 +1,187 @@ +package directory + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/directory/explicitfilepath" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for directory paths. +var Transport = dirTransport{} + +type dirTransport struct{} + +func (t dirTransport) Name() string { + return "dir" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// dirReference is an ImageReference for directory paths. +type dirReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + path string // As specified by the user. May be relative, contain symlinks, etc. + resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no directory.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// dirTransport.ParseReference; callers who intentionally deal with directories +// can use directory.NewReference. + +// NewReference returns a directory reference for a specified path. +// +// We do not expose an API supplying the resolvedPath; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedPath. +func NewReference(path string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) + if err != nil { + return nil, err + } + return dirReference{path: path, resolvedPath: resolved}, nil +} + +func (ref dirReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dirReference) StringWithinTransport() string { + return ref.path +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dirReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dirReference) PolicyConfigurationIdentity() string { + return ref.resolvedPath +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dirReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedPath + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by dirTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src := newImageSource(ref) + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ref), nil +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + compress := false + if sys != nil { + compress = sys.DirForceCompress + } + return newImageDestination(ref, compress) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for dir: images") +} + +// manifestPath returns a path for the manifest within a directory using our conventions. +func (ref dirReference) manifestPath() string { + return filepath.Join(ref.path, "manifest.json") +} + +// layerPath returns a path for a layer tarball within a directory using our conventions. +func (ref dirReference) layerPath(digest digest.Digest) string { + // FIXME: Should we keep the digest identification? + return filepath.Join(ref.path, digest.Hex()) +} + +// signaturePath returns a path for a signature within a directory using our conventions. +func (ref dirReference) signaturePath(index int) string { + return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) +} + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/directory/explicitfilepath/path.go new file mode 100644 index 0000000..71136b8 --- /dev/null +++ b/vendor/github.com/containers/image/directory/explicitfilepath/path.go @@ -0,0 +1,56 @@ +package explicitfilepath + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch _, err := os.Lstat(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/docker/archive/dest.go new file mode 100644 index 0000000..c88aea3 --- /dev/null +++ b/vendor/github.com/containers/image/docker/archive/dest.go @@ -0,0 +1,72 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/docker/tarfile" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +type archiveImageDestination struct { + *tarfile.Destination // Implements most of types.ImageDestination + ref archiveReference + writer io.Closer +} + +func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { + // ref.path can be either a pipe or a regular file + // in the case of a pipe, we require that we can open it for write + // in the case of a regular file, we don't want to overwrite any pre-existing file + // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, + // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) + fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", ref.path) + } + + fhStat, err := fh.Stat() + if err != nil { + return nil, errors.Wrapf(err, "error statting file %q", ref.path) + } + + if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { + return nil, errors.New("docker-archive doesn't support modifying existing images") + } + + tarDest := tarfile.NewDestination(fh, ref.destinationRef) + if sys != nil && sys.DockerArchiveAdditionalTags != nil { + tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) + } + return &archiveImageDestination{ + Destination: tarDest, + ref: ref, + writer: fh, + }, nil +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Decompress +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *archiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *archiveImageDestination) Close() error { + return d.writer.Close() +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *archiveImageDestination) Commit(ctx context.Context) error { + return d.Destination.Commit(ctx) +} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go new file mode 100644 index 0000000..e46c9db --- /dev/null +++ b/vendor/github.com/containers/image/docker/archive/src.go @@ -0,0 +1,40 @@ +package archive + +import ( + "context" + "github.com/containers/image/docker/tarfile" + "github.com/containers/image/types" + "github.com/sirupsen/logrus" +) + +type archiveImageSource struct { + *tarfile.Source // Implements most of types.ImageSource + ref archiveReference +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { + if ref.destinationRef != nil { + logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") + } + src, err := tarfile.NewSourceFromFile(ref.path) + if err != nil { + return nil, err + } + return &archiveImageSource{ + Source: src, + ref: ref, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *archiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go new file mode 100644 index 0000000..f345b34 --- /dev/null +++ b/vendor/github.com/containers/image/docker/archive/transport.go @@ -0,0 +1,160 @@ +package archive + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/docker/reference" + ctrImage "github.com/containers/image/image" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for local Docker archives. +var Transport = archiveTransport{} + +type archiveTransport struct{} + +func (t archiveTransport) Name() string { + return "docker-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in archiveReference.PolicyConfigurationIdentity. + return errors.New(`docker-archive: does not support any scopes except the default "" one`) +} + +// archiveReference is an ImageReference for Docker images. +type archiveReference struct { + // only used for destinations + // archiveReference.destinationRef is optional and can be nil for destinations as well. + destinationRef reference.NamedTagged + path string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if refString == "" { + return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) + } + + parts := strings.SplitN(refString, ":", 2) + path := parts[0] + var destinationRef reference.NamedTagged + + // A :tag was specified, which is only necessary for destinations. + if len(parts) == 2 { + ref, err := reference.ParseNormalizedNamed(parts[1]) + if err != nil { + return nil, errors.Wrapf(err, "docker-archive parsing reference") + } + ref = reference.TagNameOnly(ref) + + if _, isDigest := ref.(reference.Canonical); isDigest { + return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) + } + + refTagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + // Really shouldn't be hit... + return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) + } + destinationRef = refTagged + } + + return archiveReference{ + destinationRef: destinationRef, + path: path, + }, nil +} + +func (ref archiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref archiveReference) StringWithinTransport() string { + if ref.destinationRef == nil { + return ref.path + } + return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref archiveReference) DockerReference() reference.Named { + return ref.destinationRef +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref archiveReference) PolicyConfigurationIdentity() string { + // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. + return "" +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref archiveReference) PolicyConfigurationNamespaces() []string { + // TODO + return []string{} +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, ref) + if err != nil { + return nil, err + } + return ctrImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Not really supported, for safety reasons. + return errors.New("Deleting images not implemented for docker-archive: images") +} diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/docker/cache.go new file mode 100644 index 0000000..64ad57a --- /dev/null +++ b/vendor/github.com/containers/image/docker/cache.go @@ -0,0 +1,23 @@ +package docker + +import ( + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" +) + +// bicTransportScope returns a BICTransportScope appropriate for ref. +func bicTransportScope(ref dockerReference) types.BICTransportScope { + // Blobs can be reused across the whole registry. + return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} +} + +// newBICLocationReference returns a BICLocationReference appropriate for ref. +func newBICLocationReference(ref dockerReference) types.BICLocationReference { + // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). + return types.BICLocationReference{Opaque: ref.ref.Name()} +} + +// parseBICLocationReference returns a repository for encoded lr. +func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { + return reference.ParseNormalizedNamed(lr.Opaque) +} diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go new file mode 100644 index 0000000..26f1b03 --- /dev/null +++ b/vendor/github.com/containers/image/docker/daemon/client.go @@ -0,0 +1,85 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if sys != nil && sys.DockerDaemonHost != "" { + host = sys.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set + // TLSClientConfig to nil. This can be achieved by using the form `http://` + url, err := dockerclient.ParseHostURL(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if url.Scheme != "unix" { + if url.Scheme == "http" { + httpClient = httpConfig() + } else { + hc, err := tlsConfig(sys) + if err != nil { + return nil, err + } + httpClient = hc + } + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(sys *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if sys != nil && sys.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go new file mode 100644 index 0000000..663086f --- /dev/null +++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go @@ -0,0 +1,144 @@ +package daemon + +import ( + "context" + "io" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/docker/tarfile" + "github.com/containers/image/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type daemonImageDestination struct { + ref daemonReference + mustMatchRuntimeOS bool + *tarfile.Destination // Implements most of types.ImageDestination + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + // Other state + committed bool // writer has been closed +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { + if ref.ref == nil { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + var mustMatchRuntimeOS = true + if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "Error initializing docker engine client") + } + + reader, writer := io.Pipe() + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + goroutineContext, goroutineCancel := context.WithCancel(ctx) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(writer, namedTaggedRef), + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + reader.CloseWithError(err) + } + }() + + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + err = errors.Wrap(err, "Error saving image to docker engine") + return + } + defer resp.Body.Close() +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return d.mustMatchRuntimeOS +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() error { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including + // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the + // net/http version with native Context support in Go 1.7) do not always actually immediately cancel + // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and + // return early if the context is canceled without terminating the goroutine at all. + // So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) + } + d.goroutineCancel() + + return nil +} + +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit(ctx context.Context) error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.Destination.Commit(ctx); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-d.statusChannel: + return err + } +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go new file mode 100644 index 0000000..89e66ef --- /dev/null +++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go @@ -0,0 +1,62 @@ +package daemon + +import ( + "context" + + "github.com/containers/image/docker/tarfile" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +type daemonImageSource struct { + ref daemonReference + *tarfile.Source // Implements most of types.ImageSource +} + +type layerInfo struct { + path string + size int64 +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "Error initializing docker engine client") + } + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) + if err != nil { + return nil, errors.Wrap(err, "Error loading image from docker engine") + } + defer inputStream.Close() + + src, err := tarfile.NewSourceFromStream(inputStream) + if err != nil { + return nil, err + } + return &daemonImageSource{ + ref: ref, + Source: src, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *daemonImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go new file mode 100644 index 0000000..1a265bf --- /dev/null +++ b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go @@ -0,0 +1,223 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/containers/image/docker/policyconfiguration" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // ID values cannot be effectively namespaced, and are clearly invalid host:port values. + if _, err := digest.Parse(scope); err == nil { + return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + } + + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseAnyReference interprets such strings as digests. + if dgst, err := digest.Parse(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if reference.FamiliarName(ref) == digest.Canonical.String() { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. + // This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return reference.FamiliarString(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input: + // a single image can be namespaced either using the name or the ID depending on how it is named. + // + // That’s fairly unexpected, but we have to cope somehow. + // + // So, use the ordinary docker/policyconfiguration namespacing for named images. + // image IDs all fall into the root namespace. + // Users can set up the root namespace to be either untrusted or rejected, + // and to set up specific trust for named namespaces. This allows verifying image + // identity when a name is known, and unnamed images would be untrusted or rejected. + switch { + case ref.id != "": + return "" // This still allows using the default "" scope to define a global policy for ID-identified images. + case ref.ref != nil: + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + switch { + case ref.id != "": + return []string{} + case ref.ref != nil: + return policyconfiguration.DockerReferenceNamespaces(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return errors.Errorf("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go new file mode 100644 index 0000000..81a43e0 --- /dev/null +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -0,0 +1,644 @@ +package docker + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/pkg/docker/config" + "github.com/containers/image/pkg/sysregistriesv2" + "github.com/containers/image/pkg/tlsclientconfig" + "github.com/containers/image/types" + "github.com/docker/distribution/registry/client" + "github.com/docker/go-connections/tlsconfig" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + dockerHostname = "docker.io" + dockerV1Hostname = "index.docker.io" + dockerRegistry = "registry-1.docker.io" + + resolvedPingV2URL = "%s://%s/v2/" + resolvedPingV1URL = "%s://%s/v1/_ping" + tagsPath = "/v2/%s/tags/list" + manifestPath = "/v2/%s/manifests/%s" + blobsPath = "/v2/%s/blobs/%s" + blobUploadPath = "/v2/%s/blobs/uploads/" + extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" + + minimumTokenLifetimeSeconds = 60 + + extensionSignatureSchemaVersion = 2 // extensionSignature.Version + extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type +) + +var ( + // ErrV1NotSupported is returned when we're trying to talk to a + // docker V1 registry. + ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") + // ErrUnauthorizedForCredentials is returned when the status code returned is 401 + ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") + systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} +) + +// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: +// signature represents a Docker image signature. +type extensionSignature struct { + Version int `json:"schemaVersion"` // Version specifies the schema version + Name string `json:"name"` // Name must be in "sha256:@signatureName" format + Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" + Content []byte `json:"content"` // Content contains the signature +} + +// signatureList represents list of Docker image signatures. +type extensionSignatureList struct { + Signatures []extensionSignature `json:"signatures"` +} + +type bearerToken struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + expirationTime time.Time +} + +// dockerClient is configuration for dealing with a single Docker registry. +type dockerClient struct { + // The following members are set by newDockerClient and do not change afterwards. + sys *types.SystemContext + registry string + + // tlsClientConfig is setup by newDockerClient and will be used and updated + // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. + tlsClientConfig *tls.Config + // The following members are not set by newDockerClient and must be set by callers if needed. + username string + password string + signatureBase signatureStorageBase + scope authScope + + // The following members are detected registry properties: + // They are set after a successful detectProperties(), and never change afterwards. + client *http.Client + scheme string + challenges []challenge + supportsSignatures bool + + // Private state for setupRequestAuth (key: string, value: bearerToken) + tokenCache sync.Map + // Private state for detectProperties: + detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. + detectPropertiesError error // detectPropertiesError caches the initial error. +} + +type authScope struct { + remoteName string + actions string +} + +// sendAuth determines whether we need authentication for v2 or v1 endpoint. +type sendAuth int + +const ( + // v2 endpoint with authentication. + v2Auth sendAuth = iota + // v1 endpoint with authentication. + // TODO: Get v1Auth working + // v1Auth + // no authentication, works for both v1 and v2. + noAuth +) + +func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { + token := new(bearerToken) + if err := json.Unmarshal(blob, &token); err != nil { + return nil, err + } + if token.Token == "" { + token.Token = token.AccessToken + } + if token.ExpiresIn < minimumTokenLifetimeSeconds { + token.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) + } + if token.IssuedAt.IsZero() { + token.IssuedAt = time.Now().UTC() + } + token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) + return token, nil +} + +// this is cloned from docker/go-connections because upstream docker has changed +// it and make deps here fails otherwise. +// We'll drop this once we upgrade to docker 1.13.x deps. +func serverDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } +} + +// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. +func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { + if sys != nil && sys.DockerCertPath != "" { + return sys.DockerCertPath, nil + } + if sys != nil && sys.DockerPerHostCertDirPath != "" { + return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil + } + + var ( + hostCertDir string + fullCertDirPath string + ) + for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) + } else { + hostCertDir = systemPerHostCertDirPath + } + + fullCertDirPath = filepath.Join(hostCertDir, hostPort) + _, err := os.Stat(fullCertDirPath) + if err == nil { + break + } + if os.IsNotExist(err) { + continue + } + if os.IsPermission(err) { + logrus.Debugf("error accessing certs directory due to permissions: %v", err) + continue + } + if err != nil { + return "", err + } + } + return fullCertDirPath, nil +} + +// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) +// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) +func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { + registry := reference.Domain(ref.ref) + username, password, err := config.GetAuthentication(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") + } + sigBase, err := configuredSignatureStorageBase(sys, ref, write) + if err != nil { + return nil, err + } + + client, err := newDockerClient(sys, registry, ref.ref.Name()) + if err != nil { + return nil, err + } + client.username = username + client.password = password + client.signatureBase = sigBase + client.scope.actions = actions + client.scope.remoteName = reference.Path(ref.ref) + return client, nil +} + +// newDockerClient returns a new dockerClient instance for the given registry +// and reference. The reference is used to query the registry configuration +// and can either be a registry (e.g, "registry.com[:5000]"), a repository +// (e.g., "registry.com[:5000][/some/namespace]/repo"). +// Please note that newDockerClient does not set all members of dockerClient +// (e.g., username and password); those must be set by callers if necessary. +func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { + hostName := registry + if registry == dockerHostname { + registry = dockerRegistry + } + tlsClientConfig := serverDefault() + + // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, + // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible + // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because + // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is + // undocumented and may change if docker/docker changes. + certDir, err := dockerCertDir(sys, hostName) + if err != nil { + return nil, err + } + if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { + return nil, err + } + + // Check if TLS verification shall be skipped (default=false) which can + // be specified in the sysregistriesv2 configuration. + skipVerify := false + reg, err := sysregistriesv2.FindRegistry(sys, reference) + if err != nil { + return nil, errors.Wrapf(err, "error loading registries") + } + if reg != nil { + skipVerify = reg.Insecure + } + tlsClientConfig.InsecureSkipVerify = skipVerify + + return &dockerClient{ + sys: sys, + registry: registry, + tlsClientConfig: tlsClientConfig, + }, nil +} + +// CheckAuth validates the credentials by attempting to log into the registry +// returns an error if an error occurred while making the http request or the status code received was 401 +func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { + client, err := newDockerClient(sys, registry, registry) + if err != nil { + return errors.Wrapf(err, "error creating new docker client") + } + client.username = username + client.password = password + + resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusUnauthorized: + return ErrUnauthorizedForCredentials + default: + return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) + } +} + +// SearchResult holds the information of each matching image +// It matches the output returned by the v1 endpoint +type SearchResult struct { + Name string `json:"name"` + Description string `json:"description"` + // StarCount states the number of stars the image has + StarCount int `json:"star_count"` + IsTrusted bool `json:"is_trusted"` + // IsAutomated states whether the image is an automated build + IsAutomated bool `json:"is_automated"` + // IsOfficial states whether the image is an official build + IsOfficial bool `json:"is_official"` +} + +// SearchRegistry queries a registry for images that contain "image" in their name +// The limit is the max number of results desired +// Note: The limit value doesn't work with all registries +// for example registry.access.redhat.com returns all the results without limiting it to the limit value +func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { + type V2Results struct { + // Repositories holds the results returned by the /v2/_catalog endpoint + Repositories []string `json:"repositories"` + } + type V1Results struct { + // Results holds the results returned by the /v1/search endpoint + Results []SearchResult `json:"results"` + } + v2Res := &V2Results{} + v1Res := &V1Results{} + + // Get credentials from authfile for the underlying hostname + username, password, err := config.GetAuthentication(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") + } + + // The /v2/_catalog endpoint has been disabled for docker.io therefore + // the call made to that endpoint will fail. So using the v1 hostname + // for docker.io for simplicity of implementation and the fact that it + // returns search results. + hostname := registry + if registry == dockerHostname { + hostname = dockerV1Hostname + } + + client, err := newDockerClient(sys, hostname, registry) + if err != nil { + return nil, errors.Wrapf(err, "error creating new docker client") + } + client.username = username + client.password = password + + // Only try the v1 search endpoint if the search query is not empty. If it is + // empty skip to the v2 endpoint. + if image != "" { + // set up the query values for the v1 endpoint + u := url.URL{ + Path: "/v1/search", + } + q := u.Query() + q.Set("q", image) + q.Set("n", strconv.Itoa(limit)) + u.RawQuery = q.Encode() + + logrus.Debugf("trying to talk to v1 search endpoint") + resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil) + if err != nil { + logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + } else { + if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { + return nil, err + } + return v1Res.Results, nil + } + } + } + + logrus.Debugf("trying to talk to v2 search endpoint") + resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil) + if err != nil { + logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) + } else { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + } else { + if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { + return nil, err + } + searchRes := []SearchResult{} + for _, repo := range v2Res.Repositories { + if strings.Contains(repo, image) { + res := SearchResult{ + Name: repo, + } + searchRes = append(searchRes, res) + } + } + return searchRes, nil + } + } + + return nil, errors.Wrapf(err, "couldn't search registry %q", registry) +} + +// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. +func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { + if err := c.detectProperties(ctx); err != nil { + return nil, err + } + + url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) +} + +// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. +// streamLen, if not -1, specifies the length of the data expected on stream. +// makeRequest should generally be preferred. +// TODO(runcom): too many arguments here, use a struct +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequest(method, url, stream) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + req.ContentLength = streamLen + } + req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") + for n, h := range headers { + for _, hh := range h { + req.Header.Add(n, hh) + } + } + if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { + req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) + } + if auth == v2Auth { + if err := c.setupRequestAuth(req, extraScope); err != nil { + return nil, err + } + } + logrus.Debugf("%s %s", method, url) + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + return res, nil +} + +// we're using the challenges from the /v2/ ping response and not the one from the destination +// URL in this request because: +// +// 1) docker does that as well +// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request +// +// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up +func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { + if len(c.challenges) == 0 { + return nil + } + schemeNames := make([]string, 0, len(c.challenges)) + for _, challenge := range c.challenges { + schemeNames = append(schemeNames, challenge.Scheme) + switch challenge.Scheme { + case "basic": + req.SetBasicAuth(c.username, c.password) + return nil + case "bearer": + cacheKey := "" + scopes := []authScope{c.scope} + if extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). + cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) + scopes = append(scopes, *extraScope) + } + var token bearerToken + t, inCache := c.tokenCache.Load(cacheKey) + if inCache { + token = t.(bearerToken) + } + if !inCache || time.Now().After(token.expirationTime) { + t, err := c.getBearerToken(req.Context(), challenge, scopes) + if err != nil { + return err + } + token = *t + c.tokenCache.Store(cacheKey, token) + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) + return nil + default: + logrus.Debugf("no handler for %s authentication", challenge.Scheme) + } + } + logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) + return nil +} + +func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { + realm, ok := challenge.Parameters["realm"] + if !ok { + return nil, errors.Errorf("missing realm in bearer auth challenge") + } + + authReq, err := http.NewRequest("GET", realm, nil) + if err != nil { + return nil, err + } + authReq = authReq.WithContext(ctx) + getParams := authReq.URL.Query() + if c.username != "" { + getParams.Add("account", c.username) + } + if service, ok := challenge.Parameters["service"]; ok && service != "" { + getParams.Add("service", service) + } + for _, scope := range scopes { + if scope.remoteName != "" && scope.actions != "" { + getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) + } + } + authReq.URL.RawQuery = getParams.Encode() + if c.username != "" && c.password != "" { + authReq.SetBasicAuth(c.username, c.password) + } + logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + tr := tlsclientconfig.NewTransport() + // TODO(runcom): insecure for now to contact the external token service + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: tr} + res, err := client.Do(authReq) + if err != nil { + return nil, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusUnauthorized: + return nil, ErrUnauthorizedForCredentials + case http.StatusOK: + break + default: + return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) + } + tokenBlob, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + return newBearerTokenFromJSONBlob(tokenBlob) +} + +// detectPropertiesHelper performs the work of detectProperties which executes +// it at most once. +func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { + // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly + // specified by the system context + if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { + c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue + } + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = c.tlsClientConfig + c.client = &http.Client{Transport: tr} + + ping := func(scheme string) error { + url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return err + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + c.challenges = parseAuthHeader(resp.Header) + c.scheme = scheme + c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" + return nil + } + err := ping("https") + if err != nil && c.tlsClientConfig.InsecureSkipVerify { + err = ping("http") + } + if err != nil { + err = errors.Wrap(err, "pinging docker registry returned") + if c.sys != nil && c.sys.DockerDisableV1Ping { + return err + } + // best effort to understand if we're talking to a V1 registry + pingV1 := func(scheme string) bool { + url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err != nil { + logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + return false + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return false + } + return true + } + isV1 := pingV1("https") + if !isV1 && c.tlsClientConfig.InsecureSkipVerify { + isV1 = pingV1("http") + } + if isV1 { + err = ErrV1NotSupported + } + } + return err +} + +// detectProperties detects various properties of the registry. +// See the dockerClient documentation for members which are affected by this. +func (c *dockerClient) detectProperties(ctx context.Context) error { + c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) + return c.detectPropertiesError +} + +// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, +// using the original data structures. +func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) + res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + var parsedBody extensionSignatureList + if err := json.Unmarshal(body, &parsedBody); err != nil { + return nil, errors.Wrapf(err, "Error decoding signature list") + } + return &parsedBody, nil +} diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go new file mode 100644 index 0000000..744667f --- /dev/null +++ b/vendor/github.com/containers/image/docker/docker_image.go @@ -0,0 +1,107 @@ +package docker + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods +// which are specific to Docker. +type Image struct { + types.ImageCloser + src *dockerImageSource +} + +// newImage returns a new Image interface type after setting up +// a client to the registry hosting the given image. +// The caller must call .Close() on the returned Image. +func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { + s, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, s) + if err != nil { + return nil, err + } + return &Image{ImageCloser: img, src: s}, nil +} + +// SourceRefFullName returns a fully expanded name for the repository this image is in. +func (i *Image) SourceRefFullName() string { + return i.src.ref.ref.Name() +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. (This is a +// backward-compatible shim method which calls the module-level +// GetRepositoryTags) +func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { + return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref) +} + +// GetRepositoryTags list all tags available in the repository. The tag +// provided inside the ImageReference will be ignored. +func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { + dr, ok := ref.(dockerReference) + if !ok { + return nil, errors.Errorf("ref must be a dockerReference") + } + + path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) + client, err := newDockerClientFromRef(sys, dr, false, "pull") + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + tags := make([]string, 0) + + for { + res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + // print url also + return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } + + var tagsHolder struct { + Tags []string + } + if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { + return nil, err + } + tags = append(tags, tagsHolder.Tags...) + + link := res.Header.Get("Link") + if link == "" { + break + } + + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + // can be relative or absolute, but we only want the path (and I + // guess we're in trouble if it forwards to a new place...) + path = linkURL.Path + if linkURL.RawQuery != "" { + path += "?" + path += linkURL.RawQuery + } + } + return tags, nil +} diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go new file mode 100644 index 0000000..c116cbe --- /dev/null +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -0,0 +1,611 @@ +package docker + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache/none" + "github.com/containers/image/types" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageDestination struct { + ref dockerReference + c *dockerClient + // State + manifestDigest digest.Digest // or "" if not yet known. +} + +// newImageDestination creates a new ImageDestination for the specified image reference. +func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { + c, err := newDockerClientFromRef(sys, ref, true, "pull,push") + if err != nil { + return nil, err + } + return &dockerImageDestination{ + ref: ref, + c: c, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dockerImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dockerImageDestination) Close() error { + return nil +} + +func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.signatureBase != nil: + return nil + case d.c.supportsSignatures: + return nil + default: + return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") + } +} + +func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *dockerImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. +} + +// sizeCounter is an io.Writer which only counts the total size of its input. +type sizeCounter struct{ size int64 } + +func (c *sizeCounter) Write(p []byte) (n int, err error) { + c.size += int64(len(p)) + return len(p), nil +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dockerImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + if inputInfo.Digest.String() != "" { + // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. + // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. + // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. + haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) + if err != nil { + return types.BlobInfo{}, err + } + if haveBlob { + return reusedInfo, nil + } + } + + // FIXME? Chunked upload, progress reporting, etc. + uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) + logrus.Debugf("Uploading %s", uploadPath) + res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusAccepted { + logrus.Debugf("Error initiating layer upload, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry) + } + uploadLocation, err := res.Location() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") + } + + digester := digest.Canonical.Digester() + sizeCounter := &sizeCounter{} + tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) + res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil) + if err != nil { + logrus.Debugf("Error uploading layer chunked, response %#v", res) + return types.BlobInfo{}, err + } + defer res.Body.Close() + computedDigest := digester.Digest() + + uploadLocation, err = res.Location() + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") + } + + // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) + + locationQuery := uploadLocation.Query() + // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 + locationQuery.Set("digest", computedDigest.String()) + uploadLocation.RawQuery = locationQuery.Encode() + res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + if err != nil { + return types.BlobInfo{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + logrus.Debugf("Error uploading layer, response %#v", *res) + return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation) + } + + logrus.Debugf("Upload of layer %s complete", computedDigest) + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) + return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil +} + +// blobExists returns true iff repo contains a blob with digest, and if so, also its size. +// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); +// it returns a non-nil error only on an unexpected failure. +func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { + checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) + logrus.Debugf("Checking %s", checkPath) + res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name()) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, nil + default: + return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) + } +} + +// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. +func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { + u := url.URL{ + Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), + RawQuery: url.Values{ + "mount": {srcDigest.String()}, + "from": {reference.Path(srcRepo)}, + }.Encode(), + } + mountPath := u.String() + logrus.Debugf("Trying to mount %s", mountPath) + res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope) + if err != nil { + return err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusCreated: + logrus.Debugf("... mount OK") + return nil + case http.StatusAccepted: + // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. + // Abort, and let the ultimate caller do an upload when its ready, instead. + // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. + uploadLocation, err := res.Location() + if err != nil { + return errors.Wrap(err, "Error determining upload URL after a mount attempt") + } + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) + res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + if err != nil { + logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) + } else { + defer res2.Body.Close() + if res2.StatusCode != http.StatusNoContent { + logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) + } + } + // Anyway, if canceling the upload fails, ignore it and return the more important error: + return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + default: + logrus.Debugf("Error mounting, response %#v", *res) + return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + } +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + + // First, check whether the blob happens to already exist at the destination. + exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) + if err != nil { + return false, types.BlobInfo{}, err + } + if exists { + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + } + + // Then try reusing blobs from other locations. + for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { + candidateRepo, err := parseBICLocationReference(candidate.Location) + if err != nil { + logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) + continue + } + logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) + + // Sanity checks: + if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) + continue + } + if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + logrus.Debug("... Already tried the primary destination") + continue + } + + // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. + + // Checking candidateRepo, and mounting from it, requires an + // expanded token scope. + extraScope := &authScope{ + remoteName: reference.Path(candidateRepo), + actions: "pull", + } + // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. + // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. + // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. + // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. + // Even worse, docker/distribution does not actually reasonably implement canceling uploads + // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); + // so, be a nice client and don't create unnecesary upload sessions on the server. + exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) + if err != nil { + logrus.Debugf("... Failed: %v", err) + continue + } + if !exists { + // FIXME? Should we drop the blob from cache here (and elsewhere?)? + continue // logrus.Debug() already happened in blobExists + } + if candidateRepo.Name() != d.ref.ref.Name() { + if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { + logrus.Debugf("... Mount failed: %v", err) + continue + } + } + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil + } + + return false, types.BlobInfo{}, nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error { + digest, err := manifest.Digest(m) + if err != nil { + return err + } + d.manifestDigest = digest + + refTail, err := d.ref.tagOrDigest() + if err != nil { + return err + } + path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) + + headers := map[string][]string{} + mimeType := manifest.GuessMIMEType(m) + if mimeType != "" { + headers["Content-Type"] = []string{mimeType} + } + res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if !successStatus(res.StatusCode) { + err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name()) + if isManifestInvalidError(errors.Cause(err)) { + err = types.ManifestTypeRejectedError{Err: err} + } + return err + } + return nil +} + +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + +// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. +func isManifestInvalidError(err error) bool { + errors, ok := err.(errcode.Errors) + if !ok || len(errors) == 0 { + return false + } + err = errors[0] + ec, ok := err.(errcode.ErrorCoder) + if !ok { + return false + } + + switch ec.ErrorCode() { + // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. + case v2.ErrorCodeManifestInvalid: + return true + // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) + // when uploading to a tag (because it can’t find a matching tag inside the manifest) + case v2.ErrorCodeTagInvalid: + return true + // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when + // uploading an OCI manifest that is (correctly, according to the spec) missing + // a top-level media type. See libpod issue #1719 + // FIXME: remove this case when ECR behavior is fixed + case errcode.ErrorCodeUnsupported: + return strings.Contains(err.Error(), "Invalid JSON syntax") + default: + return false + } +} + +func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + // Do not fail if we don’t really need to support signatures. + if len(signatures) == 0 { + return nil + } + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.signatureBase != nil: + return d.putSignaturesToLookaside(signatures) + case d.c.supportsSignatures: + return d.putSignaturesToAPIExtension(ctx, signatures) + default: + return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") + } +} + +// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { + // FIXME? This overwrites files one at a time, definitely not atomic. + // A failure when updating signatures with a reordered copy could lose some of them. + + // Skip dealing with the manifest digest if not necessary. + if len(signatures) == 0 { + return nil + } + + if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.Errorf("Unknown manifest digest, can't add signatures") + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + for i, signature := range signatures { + url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + err := d.putOneSignature(url, signature) + if err != nil { + return err + } + } + // Remove any other signatures, if present. + // We stop at the first missing signature; if a previous deleting loop aborted + // prematurely, this may not clean up all of them, but one missing signature + // is enough for dockerImageSource to stop looking for other signatures, so that + // is sufficient. + for i := len(signatures); ; i++ { + url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + missing, err := d.c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + + return nil +} + +// putOneSignature stores one signature to url. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { + switch url.Scheme { + case "file": + logrus.Debugf("Writing to %s", url.Path) + err := os.MkdirAll(filepath.Dir(url.Path), 0755) + if err != nil { + return err + } + err = ioutil.WriteFile(url.Path, signature, 0644) + if err != nil { + return err + } + return nil + + case "http", "https": + return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + } +} + +// deleteOneSignature deletes a signature from url, if it exists. +// If it successfully determines that the signature does not exist, returns (true, nil) +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Deleting %s", url.Path) + err := os.Remove(url.Path) + if err != nil && os.IsNotExist(err) { + return true, nil + } + return false, err + + case "http", "https": + return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + default: + return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + } +} + +// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. +func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error { + // Skip dealing with the manifest digest, or reading the old state, if not necessary. + if len(signatures) == 0 { + return nil + } + + if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures + return errors.Errorf("Unknown manifest digest, can't add signatures") + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures, + // but the X-Registry-Supports-Signatures API extension does not support that yet. + + existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range existingSignatures.Signatures { + existingSigNames[sig.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range existingSignatures.Signatures { + if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "Error generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + sig := extensionSignature{ + Version: extensionSignatureSchemaVersion, + Name: signatureName, + Type: extensionSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + + path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) + res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + body, err := ioutil.ReadAll(res.Body) + if err == nil { + logrus.Debugf("Error body %s", string(body)) + } + logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) + return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry) + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dockerImageDestination) Commit(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go new file mode 100644 index 0000000..c43e6e7 --- /dev/null +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -0,0 +1,452 @@ +package docker + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/pkg/sysregistriesv2" + "github.com/containers/image/types" + "github.com/docker/distribution/registry/client" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerImageSource struct { + ref dockerReference + c *dockerClient + // State + cachedManifest []byte // nil if not loaded yet + cachedManifestMIMEType string // Only valid if cachedManifest != nil +} + +// newImageSource creates a new ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) + if err != nil { + return nil, errors.Wrapf(err, "error loading registries configuration") + } + if registry == nil { + // No configuration was found for the provided reference, so use the + // equivalent of a default configuration. + registry = &sysregistriesv2.Registry{ + Endpoint: sysregistriesv2.Endpoint{ + Location: ref.ref.String(), + }, + Prefix: ref.ref.String(), + } + } + + primaryDomain := reference.Domain(ref.ref) + // Check all endpoints for the manifest availability. If we find one that does + // contain the image, it will be used for all future pull actions. Always try the + // non-mirror original location last; this both transparently handles the case + // of no mirrors configured, and ensures we return the error encountered when + // acessing the upstream location if all endpoints fail. + manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint") + pullSources, err := registry.PullSourcesFromReference(ref.ref) + if err != nil { + return nil, err + } + for _, pullSource := range pullSources { + logrus.Debugf("Trying to pull %q", pullSource.Reference) + dockerRef, err := newReference(pullSource.Reference) + if err != nil { + return nil, err + } + + endpointSys := sys + // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. + if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain { + copy := *endpointSys + copy.DockerAuthConfig = nil + endpointSys = © + } + + client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull") + if err != nil { + return nil, err + } + client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure + + testImageSource := &dockerImageSource{ + ref: dockerRef, + c: client, + } + + manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx) + if manifestLoadErr == nil { + return testImageSource, nil + } + } + return nil, manifestLoadErr +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dockerImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dockerImageSource) Close() error { + return nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *dockerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} + +// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) +// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. +func simplifyContentType(contentType string) string { + if contentType == "" { + return contentType + } + mimeType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return "" + } + return mimeType +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(ctx, instanceDigest.String()) + } + err := s.ensureManifestIsLoaded(ctx) + if err != nil { + return nil, "", err + } + return s.cachedManifest, s.cachedManifestMIMEType, nil +} + +func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { + path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) + headers := make(map[string][]string) + headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes + res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) + } + manblob, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, "", err + } + return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil +} + +// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType +// +// ImageSource implementations are not required or expected to do any caching, +// but because our signatures are “attached” to the manifest digest, +// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious +// signature verification failures when pulling while a tag is being updated. +func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { + if s.cachedManifest != nil { + return nil + } + + reference, err := s.ref.tagOrDigest() + if err != nil { + return err + } + + manblob, mt, err := s.fetchManifest(ctx, reference) + if err != nil { + return err + } + // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. + s.cachedManifest = manblob + s.cachedManifestMIMEType = mt + return nil +} + +func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + var ( + resp *http.Response + err error + ) + for _, url := range urls { + resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) + if err == nil { + if resp.StatusCode != http.StatusOK { + err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Debug(err) + continue + } + break + } + } + if err != nil { + return nil, 0, err + } + return resp.Body, getBlobSize(resp), nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dockerImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + return s.getExternalBlob(ctx, info.URLs) + } + + path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) + if err != nil { + return nil, 0, err + } + if res.StatusCode != http.StatusOK { + // print url also + return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } + cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) + return res.Body, getBlobSize(res), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if err := s.c.detectProperties(ctx); err != nil { + return nil, err + } + switch { + case s.c.signatureBase != nil: + return s.getSignaturesFromLookaside(ctx, instanceDigest) + case s.c.supportsSignatures: + return s.getSignaturesFromAPIExtension(ctx, instanceDigest) + default: + return [][]byte{}, nil + } +} + +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } + if digested, ok := s.ref.ref.(reference.Digested); ok { + d := digested.Digest() + if d.Algorithm() == digest.Canonical { + return d, nil + } + } + if err := s.ensureManifestIsLoaded(ctx); err != nil { + return "", err + } + return manifest.Digest(s.cachedManifest) +} + +// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, +// which is not nil. +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + // NOTE: Keep this in sync with docs/signature-protocols.md! + signatures := [][]byte{} + for i := 0; ; i++ { + url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) + if url == nil { + return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + signature, missing, err := s.getOneSignature(ctx, url) + if err != nil { + return nil, err + } + if missing { + break + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// getOneSignature downloads one signature from url. +// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { + switch url.Scheme { + case "file": + logrus.Debugf("Reading %s", url.Path) + sig, err := ioutil.ReadFile(url.Path) + if err != nil { + if os.IsNotExist(err) { + return nil, true, nil + } + return nil, false, err + } + return sig, false, nil + + case "http", "https": + logrus.Debugf("GET %s", url) + req, err := http.NewRequest("GET", url.String(), nil) + if err != nil { + return nil, false, err + } + req = req.WithContext(ctx) + res, err := s.c.client.Do(req) + if err != nil { + return nil, false, err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return nil, true, nil + } else if res.StatusCode != http.StatusOK { + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + } + sig, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, false, err + } + return sig, false, nil + + default: + return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + } +} + +// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) + if err != nil { + return nil, err + } + + var sigs [][]byte + for _, sig := range parsedBody.Signatures { + if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// deleteImage deletes the named image from the registry, if supported. +func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + // docker/distribution does not document what action should be used for deleting images. + // + // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. + // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. + // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). + // + // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". + c, err := newDockerClientFromRef(sys, ref, true, "*") + if err != nil { + return err + } + + // When retrieving the digest from a registry >= 2.3 use the following header: + // "Accept": "application/vnd.docker.distribution.manifest.v2+json" + headers := make(map[string][]string) + headers["Accept"] = []string{manifest.DockerV2Schema2MediaType} + + refTail, err := ref.tagOrDigest() + if err != nil { + return err + } + getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) + get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer get.Body.Close() + manifestBody, err := ioutil.ReadAll(get.Body) + if err != nil { + return err + } + switch get.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) + default: + return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) + } + + digest := get.Header.Get("Docker-Content-Digest") + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + + // When retrieving the digest from a registry >= 2.3 use the following header: + // "Accept": "application/vnd.docker.distribution.manifest.v2+json" + delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil) + if err != nil { + return err + } + defer delete.Body.Close() + + body, err := ioutil.ReadAll(delete.Body) + if err != nil { + return err + } + if delete.StatusCode != http.StatusAccepted { + return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) + } + + if c.signatureBase != nil { + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return err + } + + for i := 0; ; i++ { + url := signatureStorageURL(c.signatureBase, manifestDigest, i) + if url == nil { + return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") + } + missing, err := c.deleteOneSignature(url) + if err != nil { + return err + } + if missing { + break + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go new file mode 100644 index 0000000..45da7c9 --- /dev/null +++ b/vendor/github.com/containers/image/docker/docker_transport.go @@ -0,0 +1,168 @@ +package docker + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/image/docker/policyconfiguration" + "github.com/containers/image/docker/reference" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for Docker registry-hosted images. +var Transport = dockerTransport{} + +type dockerTransport struct{} + +func (t dockerTransport) Name() string { + return "docker" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// dockerReference is an ImageReference for Docker images. +type dockerReference struct { + ref reference.Named // By construction we know that !reference.IsNameOnly(ref) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if !strings.HasPrefix(refString, "//") { + return nil, errors.Errorf("docker: image reference %s does not start with //", refString) + } + ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + return NewReference(ref) +} + +// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). +func NewReference(ref reference.Named) (types.ImageReference, error) { + return newReference(ref) +} + +// newReference returns a dockerReference for a named reference. +func newReference(ref reference.Named) (dockerReference, error) { + if reference.IsNameOnly(ref) { + return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // The docker/distribution API does not really support that (we can’t ask for an image with a specific + // tag and digest), so fail. This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") + } + + return dockerReference{ + ref: ref, + }, nil +} + +func (ref dockerReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dockerReference) StringWithinTransport() string { + return "//" + reference.FamiliarString(ref.ref) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dockerReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dockerReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dockerReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.ref) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, ref) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return deleteImage(ctx, sys, ref) +} + +// tagOrDigest returns a tag or digest from the reference. +func (ref dockerReference) tagOrDigest() (string, error) { + if ref, ok := ref.ref.(reference.Canonical); ok { + return ref.Digest().String(), nil + } + if ref, ok := ref.ref.(reference.NamedTagged); ok { + return ref.Tag(), nil + } + // This should not happen, NewReference above refuses reference.IsNameOnly values. + return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) +} diff --git a/vendor/github.com/containers/image/docker/lookaside.go b/vendor/github.com/containers/image/docker/lookaside.go new file mode 100644 index 0000000..860f1ad --- /dev/null +++ b/vendor/github.com/containers/image/docker/lookaside.go @@ -0,0 +1,202 @@ +package docker + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/ghodss/yaml" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' +var systemRegistriesDirPath = builtinRegistriesDirPath + +// builtinRegistriesDirPath is the path to registries.d. +// DO NOT change this, instead see systemRegistriesDirPath above. +const builtinRegistriesDirPath = "/etc/containers/registries.d" + +// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. +// NOTE: Keep this in sync with docs/registries.d.md! +type registryConfiguration struct { + DefaultDocker *registryNamespace `json:"default-docker"` + // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), + Docker map[string]registryNamespace `json:"docker"` +} + +// registryNamespace defines lookaside locations for a single namespace. +type registryNamespace struct { + SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. + SigStoreStaging string `json:"sigstore-staging"` // For writing only. +} + +// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. +// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. +type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. + +// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. +func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { + // FIXME? Loading and parsing the config could be cached across calls. + dirPath := registriesDirPath(sys) + logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) + config, err := loadAndMergeConfig(dirPath) + if err != nil { + return nil, err + } + + topLevel := config.signatureTopLevel(ref, write) + if topLevel == "" { + return nil, nil + } + + url, err := url.Parse(topLevel) + if err != nil { + return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) + } + // NOTE: Keep this in sync with docs/signature-protocols.md! + // FIXME? Restrict to explicitly supported schemes? + repo := reference.Path(ref.ref) // Note that this is without a tag or digest. + if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references + return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) + } + url.Path = url.Path + "/" + repo + return url, nil +} + +// registriesDirPath returns a path to registries.d +func registriesDirPath(sys *types.SystemContext) string { + if sys != nil { + if sys.RegistriesDirPath != "" { + return sys.RegistriesDirPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + } + } + return systemRegistriesDirPath +} + +// loadAndMergeConfig loads configuration files in dirPath +func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { + mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} + dockerDefaultMergedFrom := "" + nsMergedFrom := map[string]string{} + + dir, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + return &mergedConfig, nil + } + return nil, err + } + configNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, configName := range configNames { + if !strings.HasSuffix(configName, ".yaml") { + continue + } + configPath := filepath.Join(dirPath, configName) + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + var config registryConfiguration + err = yaml.Unmarshal(configBytes, &config) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing %s", configPath) + } + + if config.DefaultDocker != nil { + if mergedConfig.DefaultDocker != nil { + return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + dockerDefaultMergedFrom, configPath) + } + mergedConfig.DefaultDocker = config.DefaultDocker + dockerDefaultMergedFrom = configPath + } + + for nsName, nsConfig := range config.Docker { // includes config.Docker == nil + if _, ok := mergedConfig.Docker[nsName]; ok { + return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + nsName, nsMergedFrom[nsName], configPath) + } + mergedConfig.Docker[nsName] = nsConfig + nsMergedFrom[nsName] = configPath + } + } + + return &mergedConfig, nil +} + +// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. +// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. +func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { + if config.Docker != nil { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if ns, ok := config.Docker[identity]; ok { + logrus.Debugf(` Using "docker" namespace %s`, identity) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if ns, ok := config.Docker[name]; ok { + logrus.Debugf(` Using "docker" namespace %s`, name) + if url := ns.signatureTopLevel(write); url != "" { + return url + } + } + } + } + // Look for a default location + if config.DefaultDocker != nil { + logrus.Debugf(` Using "default-docker" configuration`) + if url := config.DefaultDocker.signatureTopLevel(write); url != "" { + return url + } + } + logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) + return "" +} + +// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. +// or "" if nothing has been configured. +func (ns registryNamespace) signatureTopLevel(write bool) string { + if write && ns.SigStoreStaging != "" { + logrus.Debugf(` Using %s`, ns.SigStoreStaging) + return ns.SigStoreStaging + } + if ns.SigStore != "" { + logrus.Debugf(` Using %s`, ns.SigStore) + return ns.SigStore + } + return "" +} + +// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. +// Returns nil iff base == nil. +// NOTE: Keep this in sync with docs/signature-protocols.md! +func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { + if base == nil { + return nil + } + url := *base + url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) + return &url +} diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go new file mode 100644 index 0000000..31bbb54 --- /dev/null +++ b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go @@ -0,0 +1,56 @@ +package policyconfiguration + +import ( + "strings" + + "github.com/containers/image/docker/reference" + "github.com/pkg/errors" +) + +// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceIdentity(ref reference.Named) (string, error) { + res := ref.Name() + tagged, isTagged := ref.(reference.NamedTagged) + digested, isDigested := ref.(reference.Canonical) + switch { + case isTagged && isDigested: // Note that this CAN actually happen. + return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) + case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() + return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) + case isTagged: + res = res + ":" + tagged.Tag() + case isDigested: + res = res + "@" + digested.Digest().String() + default: // Coverage: The above was supposed to be exhaustive. + return "", errors.New("Internal inconsistency, unexpected default branch") + } + return res, nil +} + +// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceNamespaces(ref reference.Named) []string { + // Look for a match of the repository, and then of the possible parent + // namespaces. Note that this only happens on the expanded host names + // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", + // then in its parent "docker.io/library"; in none of "busybox", + // un-namespaced "library" nor in "" supposedly implicitly representing "library/". + // + // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last + // iteration matches the host name (for any namespace). + res := []string{} + name := ref.Name() + for { + res = append(res, name) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} diff --git a/vendor/github.com/containers/image/docker/reference/README.md b/vendor/github.com/containers/image/docker/reference/README.md new file mode 100644 index 0000000..3c4d74e --- /dev/null +++ b/vendor/github.com/containers/image/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/docker/reference/helpers.go b/vendor/github.com/containers/image/docker/reference/helpers.go new file mode 100644 index 0000000..978df7e --- /dev/null +++ b/vendor/github.com/containers/image/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go new file mode 100644 index 0000000..6a86ec6 --- /dev/null +++ b/vendor/github.com/containers/image/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go new file mode 100644 index 0000000..8c0c23b --- /dev/null +++ b/vendor/github.com/containers/image/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go new file mode 100644 index 0000000..7860349 --- /dev/null +++ b/vendor/github.com/containers/image/docker/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go new file mode 100644 index 0000000..5f30edd --- /dev/null +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -0,0 +1,407 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/internal/tmpdir" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. +type Destination struct { + writer io.Writer + tar *tar.Writer + repoTags []reference.NamedTagged + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + config []byte +} + +// NewDestination returns a tarfile.Destination for the specified io.Writer. +func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { + repoTags := []reference.NamedTagged{} + if ref != nil { + repoTags = append(repoTags, ref) + } + return &Destination{ + writer: dest, + tar: tar.NewWriter(dest), + repoTags: repoTags, + blobs: make(map[digest.Digest]types.BlobInfo), + } +} + +// AddRepoTags adds the specified tags to the destination's repoTags. +func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { + d.repoTags = append(d.repoTags, tags...) +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (d *Destination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *Destination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Storing signatures for docker tar files is not supported") +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *Destination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *Destination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *Destination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed datas. + if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") + streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") + if err != nil { + return types.BlobInfo{}, err + } + defer os.Remove(streamCopy.Name()) + defer streamCopy.Close() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(streamCopy, tee) + if err != nil { + return types.BlobInfo{}, err + } + _, err = streamCopy.Seek(0, os.SEEK_SET) + if err != nil { + return types.BlobInfo{}, err + } + inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. + if inputInfo.Digest == "" { + inputInfo.Digest = digester.Digest() + } + stream = streamCopy + logrus.Debugf("... streaming done") + } + + // Maybe the blob has been already sent + ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) + if err != nil { + return types.BlobInfo{}, err + } + if ok { + return reusedInfo, nil + } + + if isConfig { + buf, err := ioutil.ReadAll(stream) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") + } + d.config = buf + if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil { + return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file") + } + } else { + // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described + // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) + // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers + // in the root of the tarball. + if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil { + return types.BlobInfo{}, err + } + } + d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size} + return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") + } + if blob, ok := d.blobs[info.Digest]; ok { + return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + } + return false, types.BlobInfo{}, nil +} + +func (d *Destination) createRepositoriesFile(rootLayerID string) error { + repositories := map[string]map[string]string{} + for _, repoTag := range d.repoTags { + if val, ok := repositories[repoTag.Name()]; ok { + val[repoTag.Tag()] = rootLayerID + } else { + repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID} + } + } + + b, err := json.Marshal(repositories) + if err != nil { + return errors.Wrap(err, "Error marshaling repositories") + } + if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil { + return errors.Wrap(err, "Error writing config json file") + } + return nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *Destination) PutManifest(ctx context.Context, m []byte) error { + // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, + // so the caller trying a different manifest kind would be pointless. + var man manifest.Schema2 + if err := json.Unmarshal(m, &man); err != nil { + return errors.Wrap(err, "Error parsing manifest") + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + } + + layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors) + if err != nil { + return err + } + + if len(man.LayersDescriptors) > 0 { + if err := d.createRepositoriesFile(lastLayerID); err != nil { + return err + } + } + + repoTags := []string{} + for _, tag := range d.repoTags { + // For github.com/docker/docker consumers, this works just as well as + // refString := ref.String() + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon” patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) + repoTags = append(repoTags, refString) + } + + items := []ManifestItem{{ + Config: man.ConfigDescriptor.Digest.Hex() + ".json", + RepoTags: repoTags, + Layers: layerPaths, + Parent: "", + LayerSources: nil, + }} + itemsBytes, err := json.Marshal(&items) + if err != nil { + return err + } + + // FIXME? Do we also need to support the legacy format? + return d.sendBytes(manifestFileName, itemsBytes) +} + +// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers +func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) { + var chainID digest.Digest + lastLayerID = "" + for i, l := range layerDescriptors { + // This chainID value matches the computation in docker/docker/layer.CreateChainID … + if chainID == "" { + chainID = l.Digest + } else { + chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) + } + // … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent + // versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop. + // + // Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID + // (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more + // times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation + // which also mixes in the full image configuration seems unnecessary, at least as long as we are storing + // only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with + // configuration). + layerID := chainID.Hex() + + physicalLayerPath := l.Digest.Hex() + ".tar" + // The layer itself has been stored into physicalLayerPath in PutManifest. + // So, use that path for layerPaths used in the non-legacy manifest + layerPaths = append(layerPaths, physicalLayerPath) + // ... and create a symlink for the legacy format; + if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { + return nil, "", errors.Wrap(err, "Error creating layer symbolic link") + } + + b := []byte("1.0") + if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil { + return nil, "", errors.Wrap(err, "Error writing VERSION file") + } + + // The legacy format requires a config file per layer + layerConfig := make(map[string]interface{}) + layerConfig["id"] = layerID + + // The root layer doesn't have any parent + if lastLayerID != "" { + layerConfig["parent"] = lastLayerID + } + // The root layer configuration file is generated by using subpart of the image configuration + if i == len(layerDescriptors)-1 { + var config map[string]*json.RawMessage + err := json.Unmarshal(d.config, &config) + if err != nil { + return nil, "", errors.Wrap(err, "Error unmarshaling config") + } + for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { + layerConfig[attr] = config[attr] + } + } + b, err := json.Marshal(layerConfig) + if err != nil { + return nil, "", errors.Wrap(err, "Error marshaling layer config") + } + if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil { + return nil, "", errors.Wrap(err, "Error writing config json file") + } + + lastLayerID = layerID + } + return layerPaths, lastLayerID, nil +} + +type tarFI struct { + path string + size int64 + isSymlink bool +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + if t.isSymlink { + return os.ModeSymlink + } + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() interface{} { + return nil +} + +// sendSymlink sends a symlink into the tar stream. +func (d *Destination) sendSymlink(path string, target string) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) + if err != nil { + return nil + } + logrus.Debugf("Sending as tar link %s -> %s", path, target) + return d.tar.WriteHeader(hdr) +} + +// sendBytes sends a path into the tar stream. +func (d *Destination) sendBytes(path string, b []byte) error { + return d.sendFile(path, int64(len(b)), bytes.NewReader(b)) +} + +// sendFile sends a file into the tar stream. +func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return nil + } + logrus.Debugf("Sending as tar file %s", path) + if err := d.tar.WriteHeader(hdr); err != nil { + return err + } + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + size, err := io.Copy(d.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} + +// PutSignatures adds the given signatures to the docker tarfile (currently not +// supported). MUST be called after PutManifest (signatures reference manifest +// contents) +func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte) error { + if len(signatures) != 0 { + return errors.Errorf("Storing signatures for docker tar files is not supported") + } + return nil +} + +// Commit finishes writing data to the underlying io.Writer. +// It is the caller's responsibility to close it, if necessary. +func (d *Destination) Commit(ctx context.Context) error { + return d.tar.Close() +} diff --git a/vendor/github.com/containers/image/docker/tarfile/doc.go b/vendor/github.com/containers/image/docker/tarfile/doc.go new file mode 100644 index 0000000..4ea5369 --- /dev/null +++ b/vendor/github.com/containers/image/docker/tarfile/doc.go @@ -0,0 +1,3 @@ +// Package tarfile is an internal implementation detail of some transports. +// Do not use outside of the github.com/containers/image repo! +package tarfile diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go new file mode 100644 index 0000000..03735f8 --- /dev/null +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -0,0 +1,475 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + "sync" + + "github.com/containers/image/internal/tmpdir" + "github.com/containers/image/manifest" + "github.com/containers/image/pkg/compression" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Source is a partial implementation of types.ImageSource for reading from tarPath. +type Source struct { + tarPath string + removeTarPathOnClose bool // Remove temp file on close if true + cacheDataLock sync.Once // Atomic way to ensure that ensureCachedDataIsPresent is only invoked once + // The following data is only available after ensureCachedDataIsPresent() succeeds + cacheDataResult error // The return value of ensureCachedDataIsPresent, since it should be as safe to cache as the side effects + tarManifest *ManifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. +} + +type layerInfo struct { + path string + size int64 +} + +// TODO: We could add support for multiple images in a single archive, so +// that people could use docker-archive:opensuse.tar:opensuse:leap as +// the source of an image. +// To do for both the NewSourceFromFile and NewSourceFromStream functions + +// NewSourceFromFile returns a tarfile.Source for the specified path. +func NewSourceFromFile(path string) (*Source, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error opening file %q", path) + } + defer file.Close() + + // If the file is already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewSourceFromStream. + stream, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, errors.Wrapf(err, "Error detecting compression for file %q", path) + } + defer stream.Close() + if !isCompressed { + return &Source{ + tarPath: path, + }, nil + } + return NewSourceFromStream(stream) +} + +// NewSourceFromStream returns a tarfile.Source for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewSourceFromFile returns. +func NewSourceFromStream(inputStream io.Reader) (*Source, error) { + // FIXME: use SystemContext here. + // Save inputStream to a temporary file + tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar") + if err != nil { + return nil, errors.Wrap(err, "error creating temporary file") + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, errors.Wrap(err, "Error auto-decompressing input") + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { + return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name()) + } + succeeded = true + + return &Source{ + tarPath: tarCopyFile.Name(), + removeTarPathOnClose: true, + }, nil +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. +// The caller should call .Close() on the returned stream. +func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { + f, err := os.Open(s.tarPath) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching path within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if h.Name == path { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +func (s *Source) readTarComponent(path string) ([]byte, error) { + file, err := s.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "Error loading tar component %s", path) + } + defer file.Close() + bytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +func (s *Source) ensureCachedDataIsPresent() error { + s.cacheDataLock.Do(func() { + // Read and parse manifest.json + tarManifest, err := s.loadTarManifest() + if err != nil { + s.cacheDataResult = err + return + } + + // Check to make sure length is 1 + if len(tarManifest) != 1 { + s.cacheDataResult = errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) + return + } + + // Read and parse config. + configBytes, err := s.readTarComponent(tarManifest[0].Config) + if err != nil { + s.cacheDataResult = err + return + } + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + s.cacheDataResult = errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) + return + } + + knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) + if err != nil { + s.cacheDataResult = err + return + } + + // Success; commit. + s.tarManifest = &tarManifest[0] + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + }) + return s.cacheDataResult +} + +// loadTarManifest loads and decodes the manifest.json. +func (s *Source) loadTarManifest() ([]ManifestItem, error) { + // FIXME? Do we need to deal with the legacy format? + bytes, err := s.readTarComponent(manifestFileName) + if err != nil { + return nil, err + } + var items []ManifestItem + if err := json.Unmarshal(bytes, &items); err != nil { + return nil, errors.Wrap(err, "Error decoding tar manifest.json") + } + return items, nil +} + +// Close removes resources associated with an initialized Source, if any. +func (s *Source) Close() error { + if s.removeTarPathOnClose { + return os.Remove(s.tarPath) + } + return nil +} + +// LoadTarManifest loads and decodes the manifest.json +func (s *Source) LoadTarManifest() ([]ManifestItem, error) { + return s.loadTarManifest() +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[digest.Digest]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := tarManifest.Layers[i] + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.tarPath) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if li, ok := unknownLayerSizes[h.Name]; ok { + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + if err != nil { + return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name) + } + } + li.size = uncompressedSize + delete(unknownLayerSizes, h.Name) + } + } + if len(unknownLayerSizes) != 0 { + return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := manifest.Schema2{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + LayersDescriptors: []manifest.Schema2Descriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { + io.Reader + underlyingCloser func() error + uncompressedCloser func() error +} + +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err + } + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, + underlyingStream, err := s.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() + + // In order to handle the fact that digests != diffIDs (and thus that a + // caller which is trying to verify the blob will run into problems), + // we need to decompress blobs. This is a bit ugly, but it's a + // consequence of making everything addressable by their DiffID rather + // than by their digest... + // + // In particular, because the v2s2 manifest being generated uses + // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of + // layers not their _actual_ digest. The result is that copy/... will + // be verifing a "digest" which is not the actual layer's digest (but + // is instead the DiffID). + + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) + if err != nil { + return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest) + } + + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, + } + closeUnderlyingStream = false + + return newStream, li.size, nil + } + + return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + return [][]byte{}, nil +} diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go new file mode 100644 index 0000000..e81d939 --- /dev/null +++ b/vendor/github.com/containers/image/docker/tarfile/types.go @@ -0,0 +1,28 @@ +package tarfile + +import ( + "github.com/containers/image/manifest" + "github.com/opencontainers/go-digest" +) + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` +} + +type imageID string diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate.go b/vendor/github.com/containers/image/docker/wwwauthenticate.go new file mode 100644 index 0000000..23664a7 --- /dev/null +++ b/vendor/github.com/containers/image/docker/wwwauthenticate.go @@ -0,0 +1,159 @@ +package docker + +// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. + +import ( + "net/http" + "strings" +) + +// challenge carries information from a WWW-Authenticate response header. +// See RFC 7235. +type challenge struct { + // Scheme is the auth-scheme according to RFC 7235 + Scheme string + + // Parameters are the auth-params according to RFC 7235 + Parameters map[string]string +} + +// Octet types from RFC 7230. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []challenge { + challenges := []challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +// NOTE: This is not a fully compliant parser per RFC 7235: +// Most notably it does not support more than one challenge within a single header +// Some of the whitespace parsing also seems noncompliant. +// But it is clearly better than what we used to have… +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json b/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json new file mode 100644 index 0000000..ccb4eda --- /dev/null +++ b/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json @@ -0,0 +1,66 @@ +{ + "title": "JSON embedded in an atomic container signature", + "description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n", + "type": "object", + "required": [ + "critical", + "optional" + ], + "additionalProperties": false, + "properties": { + "critical": { + "type": "object", + "required": [ + "type", + "image", + "identity" + ], + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "atomic container signature" + ] + }, + "image": { + "type": "object", + "required": [ + "docker-manifest-digest" + ], + "additionalProperties": false, + "properties": { + "docker-manifest-digest": { + "type": "string" + } + } + }, + "identity": { + "type": "object", + "required": [ + "docker-reference" + ], + "additionalProperties": false, + "properties": { + "docker-reference": { + "type": "string" + } + } + } + } + }, + "optional": { + "type": "object", + "description": "All members are optional, but if they are included, they must be valid.", + "additionalProperties": true, + "properties": { + "creator": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/containers/image/docs/containers-certs.d.5.md b/vendor/github.com/containers/image/docs/containers-certs.d.5.md new file mode 100644 index 0000000..ffd7e4b --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-certs.d.5.md @@ -0,0 +1,28 @@ +% containers-certs.d(5) + +# NAME +containers-certs.d - Directory for storing custom container-registry TLS configurations + +# DESCRIPTION +A custom TLS configuration for a container registry can be configured by creating a directory under `/etc/containers/certs.d`. +The name of the directory must correspond to the `host:port` of the registry (e.g., `my-registry.com:5000`). + +## Directory Structure +A certs directory can contain one or more files with the following extensions: + +* `*.crt` files with this extensions will be interpreted as CA certificates +* `*.cert` files with this extensions will be interpreted as client certificates +* `*.key` files with this extensions will be interpreted as client keys + +Note that the client certificate-key pair will be selected by the file name (e.g., `client.{cert,key}`). +An examplary setup for a registry running at `my-registry.com:5000` may look as follows: +``` +/etc/containers/certs.d/ <- Certificate directory +└── my-registry.com:5000 <- Hostname:port + ├── client.cert <- Client certificate + ├── client.key <- Client key + └── ca.crt <- Certificate authority that signed the registry certificate +``` + +# HISTORY +Feb 2019, Originally compiled by Valentin Rothberg diff --git a/vendor/github.com/containers/image/docs/containers-policy.json.5.md b/vendor/github.com/containers/image/docs/containers-policy.json.5.md new file mode 100644 index 0000000..2859d81 --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-policy.json.5.md @@ -0,0 +1,283 @@ +% CONTAINERS-POLICY.JSON(5) policy.json Man Page +% Miloslav Trmač +% September 2016 + +# NAME +containers-policy.json - syntax for the signature verification policy file + +## DESCRIPTION + +Signature verification policy files are used to specify policy, e.g. trusted keys, +applicable when deciding whether to accept an image, or individual signatures of that image, as valid. + +The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`; +applications performing verification may allow using a different policy instead. + +## FORMAT + +The signature verification policy file, usually called `policy.json`, +uses a JSON format. Unlike some other JSON files, its parsing is fairly strict: +unrecognized, duplicated or otherwise invalid fields cause the entire file, +and usually the entire operation, to be rejected. + +The purpose of the policy file is to define a set of *policy requirements* for a container image, +usually depending on its location (where it is being pulled from) or otherwise defined identity. + +Policy requirements can be defined for: + +- An individual *scope* in a *transport*. + The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`), + and *scope* values are defined by each transport; see below for more details. + + Usually, a scope can be defined to match a single image, and various prefixes of + such a most specific scope define namespaces of matching images. +- A default policy for a single transport, expressed using an empty string as a scope +- A global default policy. + +If multiple policy requirements match a given image, only the requirements from the most specific match apply, +the more general policy requirements definitions are ignored. + +This is expressed in JSON using the top-level syntax +```js +{ + "default": [/* policy requirements: global default */] + "transports": { + transport_name: { + "": [/* policy requirements: default for transport $transport_name */], + scope_1: [/* policy requirements: default for $scope_1 in $transport_name */], + scope_2: [/*…*/] + /*…*/ + }, + transport_name_2: {/*…*/} + /*…*/ + } +} +``` + +The global `default` set of policy requirements is mandatory; all of the other fields +(`transports` itself, any specific transport, the transport-specific default, etc.) are optional. + + +## Supported transports and their scopes + +### `atomic:` + +The `atomic:` transport refers to images in an Atomic Registry. + +Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]], +i.e. either specifying a complete name of a tagged image, or prefix denoting +a host/namespace/image stream. + +*Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used +e.g. for `docker pull`), _not_ to the OpenShift API host and port. + +### `dir:` + +The `dir:` transport refers to images stored in local directories. + +Supported scopes are paths of directories (either containing a single image or +subdirectories possibly containing images). + +*Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored. + +The top-level scope `"/"` is forbidden; use the transport default scope `""`, +for consistency with other transports. + +### `docker:` + +The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2". + +Scopes matching individual images are named Docker references *in the fully expanded form*, either +using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). + +More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), +a repository namespace, or a registry host (by only specifying the host name). + +### `oci:` + +The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification". + +Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to +a directory containing one or more tags, or any of the parent directories. + +*Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently. + +### `tarball:` + +The `tarball:` transport refers to tarred up container root filesystems. + +Scopes are ignored. + +## Policy Requirements + +Using the mechanisms above, a set of policy requirements is looked up. The policy requirements +are represented as a JSON array of individual requirement objects. For an image to be accepted, +*all* of the requirements must be satisfied simulatenously. + +The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author); +in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object. + +The following requirement objects are supported: + +### `insecureAcceptAnything` + +A simple requirement with the following syntax + +```json +{"type":"insecureAcceptAnything"} +``` + +This requirement accepts any image (but note that other requirements in the array still apply). + +When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though. + +This is useful primarily for policy scopes where no signature verification is required; +because the array of policy requirements must not be empty, this requirement is used +to represent the lack of requirements explicitly. + +### `reject` + +A simple requirement with the following syntax: + +```json +{"type":"reject"} +``` + +This requirement rejects every image, and every signature. + +### `signedBy` + +This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key. + +```js +{ + "type": "signedBy", + "keyType": "GPGKeys", /* The only currently supported value */ + "keyPath": "/path/to/local/keyring/file", + "keyData": "base64-encoded-keyring-data", + "signedIdentity": identity_requirement +} +``` + + +Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted. + +The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image. +One of the following alternatives are supported: + +- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail. + + ```json + {"type":"matchExact"} + ``` +- If the image identity carries a tag, the identity in the signature must exactly match; + if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag). + + (Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.) + + ```json + {"type":"matchRepoDigestOrExact"} + ``` +- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version. + + ```json + {"type":"matchRepository"} + ``` +- The identity in the signature must exactly match a specified identity. + This is useful e.g. when locally mirroring images signed using their public identity. + + ```js + { + "type": "exactReference", + "dockerReference": docker_reference_value + } + ``` +- The identity in the signature must be in the same repository as a specified identity. + This combines the properties of `matchRepository` and `exactReference`. + + ```js + { + "type": "exactRepository", + "dockerRepository": docker_repository_value + } + ``` + +If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`. + +*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is +provided by the transport. In particular, the `dir:` and `oci:` transports can be only +used with `exactReference` or `exactRepository`. + + + +## Examples + +It is *strongly* recommended to set the `default` policy to `reject`, and then +selectively allow individual transports and scopes as desired. + +### A reasonably locked-down system + +(Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.) + +```js +{ + "default": [{"type": "reject"}], /* Reject anything not explicitly allowed */ + "transports": { + "docker": { + /* Allow installing images from a specific repository namespace, without cryptographic verification. + This namespace includes images like openshift/hello-openshift and openshift/origin. */ + "docker.io/openshift": [{"type": "insecureAcceptAnything"}], + /* Similarly, allow installing the “official” busybox images. Note how the fully expanded + form, with the explicit /library/, must be used. */ + "docker.io/library/busybox": [{"type": "insecureAcceptAnything"}] + /* Other docker: images use the global default policy and are rejected */ + }, + "dir": { + "": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */ + }, + "atomic": { + /* The common case: using a known key for a repository or set of repositories */ + "hostname:5000/myns/official": [ + { + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": "/path/to/official-pubkey.gpg" + } + ], + /* A more complex example, for a repository which contains a mirror of a third-party product, + which must be signed-off by local IT */ + "hostname:5000/vendor/product": [ + { /* Require the image to be signed by the original vendor, using the vendor's repository location. */ + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": "/path/to/vendor-pubkey.gpg", + "signedIdentity": { + "type": "exactRepository", + "dockerRepository": "vendor-hostname/product/repository" + } + }, + { /* Require the image to _also_ be signed by a local reviewer. */ + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": "/path/to/reviewer-pubkey.gpg" + } + ] + } + } +} +``` + +### Completely disable security, allow all images, do not trust any signatures + +```json +{ + "default": [{"type": "insecureAcceptAnything"}] +} +``` +## SEE ALSO + atomic(1) + +## HISTORY +August 2018, Rename to containers-policy.json(5) by Valentin Rothberg + +September 2016, Originally compiled by Miloslav Trmač diff --git a/vendor/github.com/containers/image/docs/containers-registries.conf.5.md b/vendor/github.com/containers/image/docs/containers-registries.conf.5.md new file mode 100644 index 0000000..6b9abff --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-registries.conf.5.md @@ -0,0 +1,177 @@ +% CONTAINERS-REGISTRIES.CONF(5) System-wide registry configuration file +% Brent Baude +% Aug 2017 + +# NAME +containers-registries.conf - Syntax of System Registry Configuration File + +# DESCRIPTION +The CONTAINERS-REGISTRIES configuration file is a system-wide configuration +file for container image registries. The file format is TOML. + +By default, the configuration file is located at `/etc/containers/registries.conf`. + +# FORMATS + +## VERSION 2 +VERSION 2 is the latest format of the `registries.conf` and is currently in +beta. This means in general VERSION 1 should be used in production environments +for now. + +### GLOBAL SETTINGS + +`unqualified-search-registries` +: An array of _host_[`:`_port_] registries to try when pulling an unqualified image, in order. + +### NAMESPACED `[[registry]]` SETTINGS + +The bulk of the configuration is represented as an array of `[[registry]]` +TOML tables; the settings may therefore differ among different registries +as well as among different namespaces/repositories within a registry. + +#### Choosing a `[[registry]]` TOML table + +Given an image name, a single `[[registry]]` TOML table is chosen based on its `prefix` field. + +`prefix` +: A prefix of the user-specified image name, i.e. using one of the following formats: + - _host_[`:`_port_] + - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…] + - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_ + - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_(`:`_tag|`@`_digest_) + + The user-specified image name must start with the specified `prefix` (and continue + with the appropriate separator) for a particular `[[registry]]` TOML table to be + considered; (only) the TOML table with the longest match is used. + + As a special case, the `prefix` field can be missing; if so, it defaults to the value + of the `location` field (described below). + +#### Per-namespace settings + +`insecure` +: `true` or `false`. + By default, container runtimes require TLS when retrieving images from a registry. + If `insecure` is set to `true`, unencrypted HTTP as well as TLS connections with untrusted + certificates are allowed. + +`blocked` +: `true` or `false`. + If `true`, pulling images with matching names is forbidden. + +#### Remapping and mirroring registries + +The user-specified image reference is, primarily, a "logical" image name, always used for naming +the image. By default, the image reference also directly specifies the registry and repository +to use, but the following options can be used to redirect the underlying accesses +to different registry servers or locations (e.g. to support configurations with no access to the +internet without having to change `Dockerfile`s, or to add redundancy). + +`location` +: Accepts the same format as the `prefix` field, and specifies the physical location + of the `prefix`-rooted namespace. + + By default, this equal to `prefix` (in which case `prefix` can be omitted and the + `[[registry]]` TOML table can only specify `location`). + + Example: Given + ``` + prefix = "example.com/foo" + location = "internal-registry-for-example.net/bar" + ``` + requests for the image `example.com/foo/myimage:latest` will actually work with the + `internal-registry-for-example.net/bar/myimage:latest` image. + +`mirror` +: An array of TOML tables specifiying (possibly-partial) mirrors for the + `prefix`-rooted namespace. + + The mirrors are attempted in the specified order; the first one that can be + contacted and contains the image will be used (and if none of the mirrors contains the image, + the primary location specified by the `registry.location` field, or using the unmodified + user-specified reference, is tried last). + + Each TOML table in the `mirror` array can contain the following fields, with the same semantics + as if specified in the `[[registry]]` TOML table directly: + - `location` + - `insecure` + +`mirror-by-digest-only` +: `true` or `false`. + If `true`, mirrors will only be used during pulling if the image reference includes a digest. + Referencing an image by digest ensures that the same is always used + (whereas referencing an image by a tag may cause different registries to return + different images if the tag mapping is out of sync). + + Note that if this is `true`, images referenced by a tag will only use the primary + registry, failing if that registry is not accessible. + +*Note*: Redirection and mirrors are currently processed only when reading images, not when pushing +to a registry; that may change in the future. + +### EXAMPLE + +``` +unqualified-search-registries = ["example.com"] + +[[registry]] +prefix = "example.com/foo" +insecure = false +blocked = false +location = "internal-registry-for-example.com/bar" + +[[registry.mirror]] +location = "example-mirror-0.local/mirror-for-foo" + +[[registry.mirror]] +location = "example-mirror-1.local/mirrors/foo" +insecure = true +``` +Given the above, a pull of `example.com/foo/image:latest` will try: + 1. `example-mirror-0.local/mirror-for-foo/image:latest` + 2. `example-mirror-1.local/mirrors/foo/image:latest` + 3. `internal-registry-for-example.net/bar/myimage:latest` + +in order, and use the first one that exists. + +## VERSION 1 +VERSION 1 can be used as alternative to the VERSION 2, but it does not support +using registry mirrors, longest-prefix matches, or location rewriting. + +The TOML format is used to build a simple list of registries under three +categories: `registries.search`, `registries.insecure`, and `registries.block`. +You can list multiple registries using a comma separated list. + +Search registries are used when the caller of a container runtime does not fully specify the +container image that they want to execute. These registries are prepended onto the front +of the specified container image until the named image is found at a registry. + +Note that insecure registries can be used for any registry, not just the registries listed +under search. + +The `registries.insecure` and `registries.block` lists have the same meaning as the +`insecure` and `blocked` fields in VERSION 2. + +### EXAMPLE +The following example configuration defines two searchable registries, one +insecure registry, and two blocked registries. + +``` +[registries.search] +registries = ['registry1.com', 'registry2.com'] + +[registries.insecure] +registries = ['registry3.com'] + +[registries.block] +registries = ['registry.untrusted.com', 'registry.unsafe.com'] +``` + +# HISTORY +Mar 2019, Added additional configuration format by Sascha Grunert + +Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg + +Jun 2018, Updated by Tom Sweeney + +Aug 2017, Originally compiled by Brent Baude diff --git a/vendor/github.com/containers/image/docs/containers-registries.d.5.md b/vendor/github.com/containers/image/docs/containers-registries.d.5.md new file mode 100644 index 0000000..dffe387 --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-registries.d.5.md @@ -0,0 +1,128 @@ +% CONTAINERS-REGISTRIES.D(5) Registries.d Man Page +% Miloslav Trmač +% August 2016 + +# NAME +containers-registries.d - Directory for various registries configurations + +# DESCRIPTION + +The registries configuration directory contains configuration for various registries +(servers storing remote container images), and for content stored in them, +so that the configuration does not have to be provided in command-line options over and over for every command, +and so that it can be shared by all users of containers/image. + +By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`; +applications may allow using a different directory instead. + +## Directory Structure + +The directory may contain any number of files with the extension `.yaml`, +each using the YAML format. Other than the mandatory extension, names of the files +don’t matter. + +The contents of these files are merged together; to have a well-defined and easy to understand +behavior, there can be only one configuration section describing a single namespace within a registry +(in particular there can be at most one one `default-docker` section across all files, +and there can be at most one instance of any key under the the `docker` section; +these sections are documented later). + +Thus, it is forbidden to have two conflicting configurations for a single registry or scope, +and it is also forbidden to split a configuration for a single registry or scope across +more than one file (even if they are not semantically in conflict). + +## Registries, Scopes and Search Order + +Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined: + +- `default-docker` is the _configuration section_ (as documented below) + for registries implementing "Docker Registry HTTP API V2". + + This key is optional. + +- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2", + or namespaces and individual images within these registries, as keys; + the value assigned to any such key is a _configuration section_. + + This key is optional. + + Scopes matching individual images are named Docker references *in the fully expanded form*, either + using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). + + More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), + a repository namespace, or a registry host (and a port if it differs from the default). + + Note that if a registry is accessed using a hostname+port configuration, the port-less hostname + is _not_ used as parent scope. + +When searching for a configuration to apply for an individual container image, only +the configuration for the most-precisely matching scope is used; configuration using +more general scopes is ignored. For example, if _any_ configuration exists for +`docker.io/library/busybox`, the configuration for `docker.io` is ignored +(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`). + +## Individual Configuration Sections + +A single configuration section is selected for a container image using the process +described above. The configuration section is a YAML mapping, with the following keys: + +- `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures). + + This key is optional; if it is missing, `sigstore` below is used. + +- `sigstore` defines an URL of the signature storage. + This URL is used for reading existing signatures, + and if `sigstore-staging` does not exist, also for adding or removing them. + + This key is optional; if it is missing, no signature storage is defined (no signatures + are download along with images, adding new signatures is possible only if `sigstore-staging` is defined). + +## Examples + +### Using Containers from Various Origins + +The following demonstrates how to to consume and run images from various registries and namespaces: + +```yaml +docker: + registry.database-supplier.com: + sigstore: https://sigstore.database-supplier.com + distribution.great-middleware.org: + sigstore: https://security-team.great-middleware.org/sigstore + docker.io/web-framework: + sigstore: https://sigstore.web-framework.io:8080 +``` + +### Developing and Signing Containers, Staging Signatures + +For developers in `example.com`: + +- Consume most container images using the public servers also used by clients. +- Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures. +- Craft an individual exception for a single branch a specific developer is working on locally. + +```yaml +docker: + registry.example.com: + sigstore: https://registry-sigstore.example.com + registry.example.com/mydepartment: + sigstore: https://sigstore.mydepartment.example.com + sigstore-staging: file:///mnt/mydepartment/sigstore-staging + registry.example.com/mydepartment/myproject:mybranch: + sigstore: http://localhost:4242/sigstore + sigstore-staging: file:///home/useraccount/webroot/sigstore +``` + +### A Global Default + +If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server +without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures. + +```yaml +default-docker: + sigstore-staging: file:///mnt/company/common-sigstore-staging +``` + +# AUTHORS + +Miloslav Trmač diff --git a/vendor/github.com/containers/image/docs/containers-signature.5.md b/vendor/github.com/containers/image/docs/containers-signature.5.md new file mode 100644 index 0000000..5b99e7c --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-signature.5.md @@ -0,0 +1,241 @@ +% container-signature(5) Container signature format +% Miloslav Trmač +% March 2017 + +# Container signature format + +This document describes the format of container signatures, +as implemented by the `github.com/containers/image/signature` package. + +Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package +(preferably through the higher-level `signature.PolicyContext` interface) +without having to care about the details of the format described below. +This documentation exists primarily for maintainers of the package +and to allow independent reimplementations. + +## High-level overview + +The signature provides an end-to-end authenticated claim that a container image +has been approved by a specific party (e.g. the creator of the image as their work, +an automated build system as a result of an automated build, +a company IT department approving the image for production) under a specified _identity_ +(e.g. an OS base image / specific application, with a specific version). + +A container signature consists of a cryptographic signature which identifies +and authenticates who signed the image, and carries as a signed payload a JSON document. +The JSON document identifies the image being signed, claims a specific identity of the +image and if applicable, contains other information about the image. + +The signatures do not modify the container image (the layers, configuration, manifest, …); +e.g. their presence does not change the manifest digest used to identify the image in +docker/distribution servers; rather, the signatures are associated with an immutable image. +An image can have any number of signatures so signature distribution systems SHOULD support +associating more than one signature with an image. + +## The cryptographic signature + +As distributed, the container signature is a blob which contains a cryptographic signature +in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the +JSON document and a signature of the JSON document; it is not a “detached signature” with +independent blobs containing the JSON document and a cryptographic signature). + +Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880), +but others may be added in the future. (The blob does not contain metadata identifying the +cryptographic signature format. It is expected that most formats are sufficiently self-describing +that this is not necessary and the configured expected public key provides another indication +of the expected cryptographic signature format. Such metadata may be added in the future for +newly added cryptographic signature formats, if necessary.) + +Consumers of container signatures SHOULD verify the cryptographic signature +against one or more trusted public keys +(e.g. defined in a [policy.json signature verification policy file](policy.json.md)) +before parsing or processing the JSON payload in _any_ way, +in particular they SHOULD stop processing the container signature +if the cryptographic signature verification fails, without even starting to process the JSON payload. + +(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature, +and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs, +e.g. to list the authors and image identities of signatures associated with a single container image; +if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted +or in any way usable for making policy decisions about the image.) + +### OpenPGP signature verification + +When verifying a cryptographic signature in the OpenPGP format, +the consumer MUST verify at least the following aspects of the signature +(like the `github.com/containers/image/signature` package does): + +- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3. + (e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format). +- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image). +- The signature MUST be correctly formed and pass the cryptographic validation. +- The signature MUST correctly authenticate the included JSON payload + (in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated). +- The signature MUST NOT be expired. + +The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected. + +## JSON processing and forward compatibility + +The payload of the cryptographic signature is a JSON document (RFC 7159). +Consumers SHOULD parse it very strictly, +refusing any signature which violates the expected format (e.g. missing members, incorrect member types) +or can be interpreted ambiguously (e.g. a duplicated member in a JSON object). + +Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized +to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below) +and if the semantics of the invalid document, as created by such an implementation, is clear. + +The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`, +each a JSON object. + +The `critical` object MUST contain a `type` member identifying the document as a container signature +(as defined [below](#criticaltype)) +and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value. + +To ensure forward compatibility (allowing older signature consumers to correctly +accept or reject signatures created at a later date, with possible extensions to this format), +consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects, +contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected. +At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members. + +For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object, +and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported. +Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid. + +## JSON data format + +An example of the full format follows, with detailed description below. +To reiterate, consumers of the signature SHOULD perform successful cryptographic verification, +and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above. + +```json +{ + "critical": { + "type": "atomic container signature", + "image": { + "docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e" + }, + "identity": { + "docker-reference": "docker.io/library/busybox:latest" + } + }, + "optional": { + "creator": "some software package v1.0.1-35", + "timestamp": 1483228800, + } +} +``` + +### `critical` + +This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature. + +Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. + +### `critical.type` + +This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces). + +Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value. + +(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future; +if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`, +not by this document.) + +### `critical.image` + +This MUST be a JSON object which identifies the container image this signature applies to. + +Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. + +(Currently only the `docker-manifest-digest` way of identifying a container image is defined; +alternatives to this may be defined in the future, +but existing consumers are required to reject signatures which use formats they do not support.) + +### `critical.image.docker-manifest-digest` + +This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format. + +The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system. + +The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way +(e.g. parsing the manifest further or downloading layers of the image). + +Implementation notes: +* A single container image manifest may have several valid manifest digest values, using different algorithms. +* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests, +the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob. + +### `critical.identity` + +This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information), +as asserted by the author of the signature. + +Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. + +(Currently only the `docker-reference` way of claiming an image identity/purpose is defined; +alternatives to this may be defined in the future, +but existing consumers are required to reject signatures which use formats they do not support.) + +### `critical.identity.docker-reference` + +This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format, +and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`). +If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning, +the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces). + +The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image +(again, accounting for the `docker/distribution/reference` normalization semantics). + +In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image. +However, depending on the specific application, users or system administrators may accept less specific matches +(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest), +or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image +(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images). +The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](policy.json.md). + +The `critical.identity.docker-reference` value SHOULD contain either a tag or digest; +in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](policy.json.md#signedby).) + +### `optional` + +This MUST be a JSON object. + +Consumers SHOULD accept any members with unrecognized names in the `optional` object, +and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported. +Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid. + +### `optional.creator` + +If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature. + +The contents of this string is not defined in detail; however each implementation creating container signatures: + +- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number) +- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number) + changes whenever the format or semantics of the generated signature changes in any way; + it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value +- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp), + and which makes it easy enough to recognize a range of versions of a specific implementation + (e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering; + the string should contain a version number, or at least a date of the commit). + +Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator` +(perhaps augmented with `optional.timestamp`), +and MAY change their processing of the signature based on these values +(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively), +as long as the semantics of the invalid document, as created by such an implementation, is clear. + +If consumers of signatures do change their behavior based on the `optional.creator` value, +they SHOULD take care that the way they process the signatures is not inconsistent with +strictly validating signature consumers. +(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value +if other implementations would completely reject the signature, +but it would be very undesirable for the two kinds of implementations to accept the signature in different +and inconsistent situations.) + +### `optional.timestamp` + +If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created +as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC). diff --git a/vendor/github.com/containers/image/docs/containers-transports.5.md b/vendor/github.com/containers/image/docs/containers-transports.5.md new file mode 100644 index 0000000..e9d3b9c --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-transports.5.md @@ -0,0 +1,109 @@ +% CONTAINERS-TRANSPORTS(5) Containers Transports Man Page +% Valentin Rothberg +% April 2019 + +## NAME + +containers-transports - description of supported transports for copying and storing container images + +## DESCRIPTION + +Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations. +The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below. + +### **containers-storage:** [storage-specifier]{image-id|docker-reference[@image-id]} + +An image located in a local containers storage. +The format of _docker-reference_ is described in detail in the **docker** transport. + +The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory. +The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored. +The optional `options` are a comma-separated list of driver-specific options. +Please refer to containers-storage.conf(5) for further information on the drivers and supported options. + +### **dir:**_path_ + +An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. +This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. + +### **docker://**_docker-reference_ + +An image in a registry implementing the "Docker Registry HTTP API V2". +By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1). +If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1). +The containers-registries.conf(5) further allows for configuring various settings of a registry. + +Note that a _docker-reference_ has the following format: `name[:tag|@digest]`. +While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do. +Digests can also be used in an image destination as long as the manifest matches the provided digest. +The digest of images can be explored with skopeo-inspect(1). +If `name` does not contain a slash, it is treated as `docker.io/library/name`. +Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost). +If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`. + +### **docker-archive:**_path[:docker-reference]_ + +An image is stored in the docker-save(1) formatted file. +_docker-reference_ is only used when creating such a file, and it must not contain a digest. +It is further possible to copy data to stdin by specifying `docker-archive:/dev/stdin` but note that the used file must be seekable. + +### **docker-daemon:**_docker-reference|algo:digest_ + +An image stored in the docker daemon's internal storage. +The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source. +The _algo:digest_ refers to the image ID reported by docker-inspect(1). + +### **oci:**_path[:tag]_ + +An image compliant with the "Open Container Image Layout Specification" at _path_. +Using a _tag_ is optional and allows for storing multiple images at the same _path_. + +### **oci-archive:**_path[:tag]_ + +An image compliant with the "Open Container Image Layout Specification" stored as a tar(1) archive at _path_. + +### **ostree:**_docker-reference[@/absolute/repo/path]_ + +An image in the local ostree(1) repository. +_/absolute/repo/path_ defaults to _/ostree/repo_. + +## Examples + +The following examples demonstrate how some of the containers transports can be used. +The examples use skopeo-copy(1) for copying container images. + +**Copying an image from one registry to another**: +``` +$ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest +``` + +**Copying an image from a running Docker daemon to a directory in the OCI layout**: +``` +$ mkdir alpine-oci +$ skopeo copy docker-daemon:alpine:latest oci:alpine-oci +$ tree alpine-oci +test-oci/ +├── blobs +│   └── sha256 +│   ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7 +│   ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc +│   └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7 +├── index.json +└── oci-layout + +2 directories, 5 files +``` + +**Copying an image from a registry to the local storage**: +``` +$ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest +``` + +## SEE ALSO + +docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5) + +## AUTHORS + +Miloslav Trmač +Valentin Rothberg diff --git a/vendor/github.com/containers/image/docs/signature-protocols.md b/vendor/github.com/containers/image/docs/signature-protocols.md new file mode 100644 index 0000000..ade2322 --- /dev/null +++ b/vendor/github.com/containers/image/docs/signature-protocols.md @@ -0,0 +1,136 @@ +# Signature access protocols + +The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image. +Some image transports (local storage formats and remote procotocols) implement these signatures natively +or trivially; for others, the protocol extensions described below are necessary. + +## docker/distribution registries—separate storage + +### Usage + +Any existing docker/distribution registry, whether or not it natively supports signatures, +can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md). +`registries.d` can be configured to use one storage URL for a whole docker/distribution server, +or also separate URLs for smaller namespaces or individual repositories within the server +(which e.g. allows image authors to manage their own signature storage while publishing +the images on the public `docker.io` server). + +The signature storage URL defines a root of a path hierarchy. +It can be either a `file:///…` URL, pointing to a local directory structure, +or a `http`/`https` URL, pointing to a remote server. +`file:///` signature storage can be both read and written, `http`/`https` only supports reading. + +The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be +a simple static web server serving a directory structure created by writing to a `file:///` signature storage. +(This of course does not prevent other server implementations, +e.g. a HTTP server reading signatures from a database.) + +The usual workflow for producing and distributing images using the separate storage mechanism +is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private +`file:///` staging area, and a `sigstore` URL pointing to a public web server. +To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`), +and then copy the created directory structure from the `file:///` staging area +to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL. +The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to, +set up a `sigstore` URL pointing to the public web server. + +### Path structure + +Given a _base_ signature storage URL configured in `registries.d` as mentioned above, +and a container image stored in a docker/distribution registry using the _fully-expanded_ name +_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`, +_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`), +signatures are accessed using URLs of the form +> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_ + +where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest +(i.e. even if the user referenced the image using a tag, +the signature storage is always disambiguated using digest references). +Note that in the URLs used for signatures, +_digest-algo_ and _digest-value_ are separated using the `=` character, +not `:` like when acessing the manifest using the docker/distribution API. + +Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1. +Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1, +and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist. +Similarly, to add one more signature to an image, find the first _index_ which does not exist, and +then store the new signature using that _index_ value. + +There is no way to list existing signatures other than iterating through the successive _index_ values, +and no way to download all of the signatures at once. + +### Examples + +For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e` +(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest), +and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image, +the following URLs would be accessed to download all signatures: +> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1` +> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2` +> - … + +For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same +`sigstore` URL, the signatures would be available at +> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1` + +and so on. + +## (OpenShift) docker/distribution API extension + +As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides +an extension of the docker/distribution API which allows simpler access to the signatures, +using only the docker/distribution API endpoint. + +This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint, +and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well), +and it is the preferred way implement signature storage in registries. + +See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API. + +To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…` +path to read an array of signatures. Use only the signature objects +which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`; +ignore the other fields of the signature object. + +To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`, +and `content` set to the signature. Also set `name` to an unique name with the form +_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL), +and _per-image-name_ is any unique identifier. + +To add more than one signature, add them one at a time. This API does not allow deleting signatures. + +Note that because signatures are stored within the cluster-wide image objects, +i.e. different namespaces can not associate different sets of signatures to the same image, +updating signatures requires a cluster-wide access to the `imagesignatures` resource +(by default available to the `system:image-signer` role), + +## OpenShift-embedded registries + +The OpenShift-embedded registry implements the ordinary docker/distribution API, +and it also exposes images through the OpenShift REST API (available through the “API master” servers). + +Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension), +which is easier to set up and should usually be preferred. +Continue reading for details on using older versions of OpenShift. + +As of https://github.com/openshift/origin/pull/9181, +signatures are exposed through the OpenShift API +(i.e. to access the complete image, it is necessary to use both APIs, +in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints). + +To read the signature, any user with access to an image can use the `imagestreamimages` namespaced +resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects +which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of +the `ImageSignature` object. + +To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource, +with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form +_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”), +and _per-image-name_ is any unique identifier. + +Note that because signatures are stored within the cluster-wide image objects, +i.e. different namespaces can not associate different sets of signatures to the same image, +updating signatures requires a cluster-wide access to the `imagesignatures` resource +(by default available to the `system:image-signer` role), +and deleting signatures is strongly discouraged +(it deletes the signature from all namespaces which contain the same image). diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go new file mode 100644 index 0000000..1f0faa1 --- /dev/null +++ b/vendor/github.com/containers/image/image/docker_list.go @@ -0,0 +1,94 @@ +package image + +import ( + "context" + "encoding/json" + "fmt" + "runtime" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type platformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// A manifestDescriptor references a platform-specific manifest. +type manifestDescriptor struct { + manifest.Schema2Descriptor + Platform platformSpec `json:"platform"` +} + +type manifestList struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []manifestDescriptor `json:"manifests"` +} + +// chooseDigestFromManifestList parses blob as a schema2 manifest list, +// and returns the digest of the image appropriate for the current environment. +func chooseDigestFromManifestList(sys *types.SystemContext, blob []byte) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if sys != nil && sys.ArchitectureChoice != "" { + wantedArch = sys.ArchitectureChoice + } + wantedOS := runtime.GOOS + if sys != nil && sys.OSChoice != "" { + wantedOS = sys.OSChoice + } + + list := manifestList{} + if err := json.Unmarshal(blob, &list); err != nil { + return "", err + } + for _, d := range list.Manifests { + if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) +} + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + targetManifestDigest, err := chooseDigestFromManifestList(sys, manblob) + if err != nil { + return nil, err + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, err + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} + +// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate +// for the current system from the manifest available from src. +func ChooseManifestInstanceFromManifestList(ctx context.Context, sys *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { + // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, + // probably along with manifest list editing. + blob, mt, err := src.Manifest(ctx) + if err != nil { + return "", err + } + if mt != manifest.DockerV2ListMediaType { + return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) + } + return chooseDigestFromManifestList(sys, blob) +} diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go new file mode 100644 index 0000000..28cec7d --- /dev/null +++ b/vendor/github.com/containers/image/image/docker_schema1.go @@ -0,0 +1,202 @@ +package image + +import ( + "context" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestSchema1 struct { + m *manifest.Schema1 +} + +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { + return nil, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + v2s2, err := m.convertToManifestSchema2(nil, nil) + if err != nil { + return nil, err + } + return v2s2.OCIConfig(ctx) +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + // This is a bit convoluted: We can’t just have a "get embedded docker reference" method + // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually + // embed a full docker/distribution reference, but only the repo name and tag (without the host name). + // So we would have to provide a “return repo without host name, and tag” getter for the generic code, + // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the + // generic copy code needs to know about is reference.Named and that a manifest may need updating + // for some destinations. + name := reference.Path(ref) + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } else { + tag = "" + } + return m.m.Name != name || m.m.Tag != tag +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { + return m.m.Inspect(nil) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + if options.EmbeddedDockerReference != nil { + copy.m.Name = reference.Path(options.EmbeddedDockerReference) + if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { + copy.m.Tag = tagged.Tag() + } else { + copy.m.Tag = "" + } + } + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + case manifest.DockerV2Schema2MediaType: + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return memoryImageFromManifest(m2), nil + case imgspecv1.MediaTypeImageManifest: + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + InformationOnly: options.InformationOnly, + }) + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + } + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + } + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + } + + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index + + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { + var size int64 + if uploadedLayerInfos != nil { + size = uploadedLayerInfos[v2Index].Size + } + var d digest.Digest + if layerDiffIDs != nil { + d = layerDiffIDs[v2Index] + } + layers = append(layers, manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: size, + Digest: m.m.FSLayers[v1Index].BlobSum, + }) + diffIDs = append(diffIDs, d) + } + } + configJSON, err := m.m.ToSchema2Config(diffIDs) + if err != nil { + return nil, err + } + configDescriptor := manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil +} diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go new file mode 100644 index 0000000..351e73e --- /dev/null +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -0,0 +1,351 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "io/ioutil" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache/none" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: + return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination) + case imgspecv1.MediaTypeImageManifest: + return copy.convertToManifestOCI1(ctx) + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + } else { + // we assume layers are gzip'ed because docker v2s2 only deals with + // gzip'ed layers. However, OCI has non-gzip'ed layers as well. + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + } + } + + m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) + return memoryImageFromManifest(m1), nil +} + +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) + if err != nil { + return nil, errors.Wrap(err, "Error uploading empty layer") + } + if info.Digest != GzippedEmptyLayerDigest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) + } + haveGzippedEmptyLayer = true + } + blobDigest = GzippedEmptyLayerDigest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return memoryImageFromManifest(m1), nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go new file mode 100644 index 0000000..c5ca5b1 --- /dev/null +++ b/vendor/github.com/containers/image/image/manifest.go @@ -0,0 +1,73 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// Note that the public methods are intended to be a subset of types.Image +// so that embedding a genericManifest into structs works. +// will support v1 one day... +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*imgspecv1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*types.ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) +} + +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(ctx, sys, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go new file mode 100644 index 0000000..0be69ec --- /dev/null +++ b/vendor/github.com/containers/image/image/memory.go @@ -0,0 +1,65 @@ +package image + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/containers/image/types" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + return -1, nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go new file mode 100644 index 0000000..cdff26e --- /dev/null +++ b/vendor/github.com/containers/image/image/oci.go @@ -0,0 +1,198 @@ +package image + +import ( + "context" + "encoding/json" + "io/ioutil" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache/none" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 +} + +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestOCI1{ + src: src, + m: m, + }, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { + return &manifestOCI1{ + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestOCI1) manifestMIMEType() string { + return imgspecv1.MediaTypeImageManifest +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + cb, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // We can't directly convert to V1, but we can transitively convert via a V2 image + m2, err := copy.convertToManifestSchema2() + if err != nil { + return nil, err + } + return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: options.ManifestMIMEType, + InformationOnly: options.InformationOnly, + }) + case manifest.DockerV2Schema2MediaType: + return copy.convertToManifestSchema2() + default: + return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { + // Create a copy of the descriptor. + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + + // The only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + for idx := range layers { + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + m1 := manifestSchema2FromComponents(config, m.src, nil, layers) + return memoryImageFromManifest(m1), nil +} diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go new file mode 100644 index 0000000..01cc28b --- /dev/null +++ b/vendor/github.com/containers/image/image/sourced.go @@ -0,0 +1,103 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + "github.com/containers/image/types" +) + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// sourcedImage is a general set of utilities for working with container images, +// whatever is their underlying location (i.e. dockerImageSource-independent). +// Note the existence of skopeo/docker.Image: some instances of a `types.Image` +// may not be a `sourcedImage` directly. However, most users of `types.Image` +// do not care, and those who care about `skopeo/docker.Image` know they do. +type sourcedImage struct { + *UnparsedImage + manifestBlob []byte + manifestMIMEType string + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &sourcedImage{ + UnparsedImage: unparsed, + manifestBlob: manifestBlob, + manifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.manifestBlob, i.manifestMIMEType, nil +} + +func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx) +} diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go new file mode 100644 index 0000000..2c9280d --- /dev/null +++ b/vendor/github.com/containers/image/image/unparsed.go @@ -0,0 +1,95 @@ +package image + +import ( + "context" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage struct { + src types.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go new file mode 100644 index 0000000..8c77692 --- /dev/null +++ b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go @@ -0,0 +1,29 @@ +package tmpdir + +import ( + "os" + "runtime" +) + +// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' +var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles + +// builtinUnixTempDirForBigFiles is the directory path to store big files. +// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. +// DO NOT change this, instead see unixTempDirForBigFiles above. +const builtinUnixTempDirForBigFiles = "/var/tmp" + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles() string { + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = unixTempDirForBigFiles + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go new file mode 100644 index 0000000..1ba65d5 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema1.go @@ -0,0 +1,315 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if err := s1.initialize(); err != nil { + return nil, err + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + s1 := Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) + } + } + return nil +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) + for i, info := range layerInfos { + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { + return err + } + } + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range m.ExtractedV1Compatibility { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: m.Tag, + Created: &s1.Created, + DockerVersion: s1.DockerVersion, + Architecture: s1.Architecture, + Os: s1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s1.Config != nil { + i.Labels = s1.Config.Labels + } + return i, nil +} + +// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s1 := Schema2V1Image{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s1) + if err != nil { + return nil, errors.Wrapf(err, "error decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s1) + if err != nil { + return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, compat := range m.ExtractedV1Compatibility { + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2Config(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go new file mode 100644 index 0000000..0671aed --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema2.go @@ -0,0 +1,254 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/pkg/strslice" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.LayersDescriptors[i].MediaType = original[i].MediaType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: &s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go new file mode 100644 index 0000000..ae1921b --- /dev/null +++ b/vendor/github.com/containers/image/manifest/manifest.go @@ -0,0 +1,244 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/types" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we havea mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource +// should request from the backend unless directed otherwise. +var DefaultRequestedManifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2Schema1SignedMediaType, + DockerV2Schema1MediaType, + DockerV2ListMediaType, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []LayerInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures interface{} `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // best effort to understand if this is an OCI image since mediaType + // isn't in the manifest for OCI anymore + // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + Layers []imgspecv1.Descriptor `json:"layers"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig && len(ociMan.Layers) != 0 { + return imgspecv1.MediaTypeImageManifest + } + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest { + return imgspecv1.MediaTypeImageIndex + } + return DockerV2Schema2MediaType + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + switch NormalizedMIMEType(mt) { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func layerInfosToStrings(infos []LayerInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go new file mode 100644 index 0000000..9170504 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/oci.go @@ -0,0 +1,129 @@ +package manifest + +import ( + "encoding/json" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return BlobInfoFromOCI1Descriptor(m.Config) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.Layers[i].MediaType = original[i].MediaType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + json.Unmarshal(config, d1) + i := &types.ImageInspectInfo{ + Tag: "", + Created: v1.Created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go new file mode 100644 index 0000000..9571c37 --- /dev/null +++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go @@ -0,0 +1,151 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +type ociArchiveImageDestination struct { + ref ociArchiveReference + unpackedDest types.ImageDestination + tempDirRef tempDirOCIRef +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { + tempDirRef, err := createOCIRef(ref.image) + if err != nil { + return nil, errors.Wrapf(err, "error creating oci reference") + } + unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageDestination{ref: ref, + unpackedDest: unpackedDest, + tempDirRef: tempDirRef}, nil +} + +// Reference returns the reference used to set up this destination. +func (d *ociArchiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any +// Close deletes the temp directory of the oci-archive image +func (d *ociArchiveImageDestination) Close() error { + defer d.tempDirRef.deleteTempDir() + return d.unpackedDest.Close() +} + +func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { + return d.unpackedDest.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures +func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { + return d.unpackedDest.SupportsSignatures(ctx) +} + +func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.unpackedDest.DesiredLayerCompression() +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { + return d.unpackedDest.AcceptsForeignLayerURLs() +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise +func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { + return d.unpackedDest.MustMatchRuntimeOS() +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.unpackedDest.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes manifest to the destination +func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte) error { + return d.unpackedDest.PutManifest(ctx, m) +} + +func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + return d.unpackedDest.PutSignatures(ctx, signatures) +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted +// after the directory is made, it is tarred up into a file and the directory is deleted +func (d *ociArchiveImageDestination) Commit(ctx context.Context) error { + if err := d.unpackedDest.Commit(ctx); err != nil { + return errors.Wrapf(err, "error storing image %q", d.ref.image) + } + + // path of directory to tar up + src := d.tempDirRef.tempDirectory + // path to save tarred up file + dst := d.ref.resolvedFile + return tarDirectory(src, dst) +} + +// tar converts the directory at src and saves it to dst +func tarDirectory(src, dst string) error { + // input is a stream of bytes from the archive of the directory at path + input, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + return errors.Wrapf(err, "error retrieving stream of bytes from %q", src) + } + + // creates the tar file + outFile, err := os.Create(dst) + if err != nil { + return errors.Wrapf(err, "error creating tar file %q", dst) + } + defer outFile.Close() + + // copies the contents of the directory to the tar file + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + _, err = io.Copy(outFile, input) + + return err +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go new file mode 100644 index 0000000..ca74f95 --- /dev/null +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -0,0 +1,102 @@ +package archive + +import ( + "context" + "io" + + ocilayout "github.com/containers/image/oci/layout" + "github.com/containers/image/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociArchiveImageSource struct { + ref ociArchiveReference + unpackedSrc types.ImageSource + tempDirRef tempDirOCIRef +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource untars the file and saves it in a temp directory +func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { + tempDirRef, err := createUntarTempDir(ref) + if err != nil { + return nil, errors.Wrap(err, "error creating temp directory") + } + + unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageSource{ref: ref, + unpackedSrc: unpackedSrc, + tempDirRef: tempDirRef}, nil +} + +// LoadManifestDescriptor loads the manifest +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociArchRef, ok := imgRef.(ociArchiveReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") + } + tempDirRef, err := createUntarTempDir(ociArchRef) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory") + } + defer tempDirRef.deleteTempDir() + + descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index") + } + return descriptor, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociArchiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +// Close deletes the temporary directory at dst +func (s *ociArchiveImageSource) Close() error { + defer s.tempDirRef.deleteTempDir() + return s.unpackedSrc.Close() +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.unpackedSrc.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go new file mode 100644 index 0000000..7c1d26b --- /dev/null +++ b/vendor/github.com/containers/image/oci/archive/oci_transport.go @@ -0,0 +1,192 @@ +package archive + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/directory/explicitfilepath" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/internal/tmpdir" + "github.com/containers/image/oci/internal" + ocilayout "github.com/containers/image/oci/layout" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OCI archive +// it creates an oci-archive tar file by calling into the OCI transport +// tarring the directory created by oci and deleting the directory +var Transport = ociArchiveTransport{} + +type ociArchiveTransport struct{} + +// ociArchiveReference is an ImageReference for OCI Archive paths +type ociArchiveReference struct { + file string + resolvedFile string + image string +} + +func (t ociArchiveTransport) Name() string { + return "oci-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix +// into an ImageReference. +func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + file, image := internal.SplitPathAndImage(reference) + return NewReference(file, image) +} + +// NewReference returns an OCI reference for a file and a image. +func NewReference(file, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err + } + + if err := internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil +} + +func (ref ociArchiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +func (ref ociArchiveReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.file, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +func (ref ociArchiveReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +func (ref ociArchiveReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return fmt.Sprintf("%s", ref.resolvedFile) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set +func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// struct to store the ociReference and temporary directory returned by createOCIRef +type tempDirOCIRef struct { + tempDirectory string + ociRefExtracted types.ImageReference +} + +// deletes the temporary directory created +func (t *tempDirOCIRef) deleteTempDir() error { + return os.RemoveAll(t.tempDirectory) +} + +// createOCIRef creates the oci reference of the image +func createOCIRef(image string) (tempDirOCIRef, error) { + dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") + if err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") + } + ociRef, err := ocilayout.NewReference(dir, image) + if err != nil { + return tempDirOCIRef{}, err + } + + tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} + return tempDirRef, nil +} + +// creates the temporary directory and copies the tarred content to it +func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) { + tempDirRef, err := createOCIRef(ref.image) + if err != nil { + return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference") + } + src := ref.resolvedFile + dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + if err := archive.UntarPath(src, dst); err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) + } + return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory) + } + return tempDirRef, nil +} diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/oci/internal/oci_util.go new file mode 100644 index 0000000..c2012e5 --- /dev/null +++ b/vendor/github.com/containers/image/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go new file mode 100644 index 0000000..db10218 --- /dev/null +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -0,0 +1,306 @@ +package layout + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageDestination struct { + ref ociReference + index imgspecv1.Index + sharedBlobDir string + acceptUncompressedLayers bool +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + } + } + + d := &ociImageDestination{ref: ref, index: *index} + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +func (d *ociImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Pushing signatures for OCI images is not supported") +} + +func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.acceptUncompressedLayers { + return types.PreserveOriginal + } + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ociImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + } + + blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) + if err != nil { + return types.BlobInfo{}, err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return types.BlobInfo{}, err + } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, types.BlobInfo{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error { + digest, err := manifest.Digest(m) + if err != nil { + return err + } + desc := imgspecv1.Descriptor{} + desc.Digest = digest + // TODO(runcom): beaware and add support for OCI manifest list + desc.MediaType = imgspecv1.MediaTypeImageManifest + desc.Size = int64(len(m)) + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if d.ref.image != "" { + annotations := make(map[string]string) + annotations["org.opencontainers.image.ref.name"] = d.ref.image + desc.Annotations = annotations + } + desc.Platform = &imgspecv1.Platform{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + for i, manifest := range d.index.Manifests { + if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { + // TODO Should there first be a cleanup based on the descriptor we are going to replace? + d.index.Manifests[i] = *desc + return + } + } + d.index.Manifests = append(d.index.Manifests, *desc) +} + +func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + if len(signatures) != 0 { + return errors.Errorf("Pushing signatures for OCI images is not supported") + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) Commit(ctx context.Context) error { + if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go new file mode 100644 index 0000000..cc536f6 --- /dev/null +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -0,0 +1,171 @@ +package layout + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + + "github.com/containers/image/pkg/tlsclientconfig" + "github.com/containers/image/types" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageSource struct { + ref ociReference + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} + if sys != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + return d, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + // XXX: instanceDigest means that we don't immediately have the context of what + // mediaType the manifest has. In OCI this means that we don't know + // what reference it came from, so we just *assume* that its + // MediaTypeImageManifest. + // FIXME: We should actually be able to look up the manifest in the index, + // and see the MIME type there. + mimeType = imgspecv1.MediaTypeImageManifest + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + m, err := ioutil.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + + return m, mimeType, nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + return s.getExternalBlob(ctx, info.URLs) + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return [][]byte{}, nil +} + +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + errWrap := errors.New("failed fetching external blob from all urls") + for _, url := range urls { + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) + continue + } + + resp, err := s.client.Do(req.WithContext(ctx)) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + + return nil, 0, errWrap +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go new file mode 100644 index 0000000..4e5cecf --- /dev/null +++ b/vendor/github.com/containers/image/oci/layout/oci_transport.go @@ -0,0 +1,264 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/directory/explicitfilepath" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/oci/internal" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="", it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json + image string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image := internal.SplitPathAndImage(reference) + return NewReference(dir, image) +} + +// NewReference returns an OCI reference for a directory and a image. +// +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func NewReference(dir, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return fmt.Sprintf("%s", ref.resolvedDir) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, err + } + + var d *imgspecv1.Descriptor + if ref.image == "" { + // return manifest if only one image is in the oci directory + if len(index.Manifests) == 1 { + d = &index.Manifests[0] + } else { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, ErrMoreThanOneImage + } + } else { + // if image specified, look through all manifests for a match + for _, md := range index.Manifests { + if md.MediaType != imgspecv1.MediaTypeImageManifest { + continue + } + refName, ok := md.Annotations["org.opencontainers.image.ref.name"] + if !ok { + continue + } + if refName == ref.image { + d = &md + break + } + } + } + if d == nil { + return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) + } + return *d, nil +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") + } + return ociRef.getManifestDescriptor() +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, "oci-layout") +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, "index.json") +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", errors.Wrapf(err, "unexpected digest reference %s", digest) + } + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil +} diff --git a/vendor/github.com/containers/image/openshift/openshift-copies.go b/vendor/github.com/containers/image/openshift/openshift-copies.go new file mode 100644 index 0000000..01fe71a --- /dev/null +++ b/vendor/github.com/containers/image/openshift/openshift-copies.go @@ -0,0 +1,1174 @@ +package openshift + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "github.com/pkg/errors" + "golang.org/x/net/http2" + "k8s.io/client-go/util/homedir" +) + +// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. +// restTLSClientConfig contains settings to enable transport layer security +type restTLSClientConfig struct { + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type restConfig struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // TLSClientConfig contains settings to enable transport layer security + restTLSClientConfig + + // Server should be accessed without verifying the TLS + // certificate. For testing only. + Insecure bool +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. +// ClientConfig is used to make it easy to get an api server client +type clientConfig interface { + // ClientConfig returns a complete client config + ClientConfig() (*restConfig, error) +} + +// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. +func defaultClientConfig() clientConfig { + loadingRules := newOpenShiftClientConfigLoadingRules() + // REMOVED: Allowing command-line overriding of loadingRules + // REMOVED: clientcmd.ConfigOverrides + + clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) + + return clientConfig +} + +var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") + +// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. +// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. +// 1. --config value +// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file +func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { + chain := []string{} + + envVarFile := os.Getenv("KUBECONFIG") + if len(envVarFile) != 0 { + chain = append(chain, filepath.SplitList(envVarFile)...) + } else { + chain = append(chain, recommendedHomeFile) + } + + return &clientConfigLoadingRules{ + Precedence: chain, + // REMOVED: Migration support; run (oc login) to trigger migration + } +} + +// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type deferredLoadingClientConfig struct { + loadingRules *clientConfigLoadingRules + + clientConfig clientConfig +} + +// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { + return &deferredLoadingClientConfig{loadingRules: loadingRules} +} + +func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { + if config.clientConfig == nil { + // REMOVED: Support for concurrent use in multiple threads. + mergedConfig, err := config.loadingRules.Load() + if err != nil { + return nil, err + } + + var mergedClientConfig clientConfig + // REMOVED: Interactive fallback support. + mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) + + config.clientConfig = mergedClientConfig + } + + return config.clientConfig, nil +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + mergedConfig, err := mergedClientConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: In-cluster service account configuration use. + + return mergedConfig, nil +} + +var ( + // DefaultCluster is the cluster config used when no other config is specified + // TODO: eventually apiserver should start on 443 and be secure by default + defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} + + // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name + envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} +) + +// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type directClientConfig struct { + config clientcmdConfig +} + +// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { + return &directClientConfig{config} +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *directClientConfig) ClientConfig() (*restConfig, error) { + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + configAuthInfo := config.getAuthInfo() + configClusterInfo := config.getCluster() + + clientConfig := &restConfig{} + clientConfig.Host = configClusterInfo.Server + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + + // only try to read the auth information if we are secure + if isConfigTransportTLS(*clientConfig) { + var err error + + // mergo is a first write wins for map value and a last writing wins for interface values + // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. + // Our mergo.Merge version is older than this change. + // REMOVED: Support for interactive fallback. + userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) + if err != nil { + return nil, err + } + mergo.Merge(clientConfig, userAuthPartialConfig) + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + mergo.Merge(clientConfig, serverAuthPartialConfig) + } + + return clientConfig, nil +} + +// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { + mergedConfig := &restConfig{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restConfig{} + configClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + mergo.Merge(mergedConfig, configClientConfig) + + return mergedConfig, nil +} + +// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identifcation +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { + mergedConfig := &restConfig{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + + // REMOVED: prompting for missing information. + return mergedConfig, nil +} + +// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser +func canIdentifyUser(config restConfig) bool { + return len(config.Username) > 0 || + (len(config.CertFile) > 0 || len(config.CertData) > 0) || + len(config.BearerToken) > 0 + +} + +// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *directClientConfig) ConfirmUsable() error { + var validationErrors []error + validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) + validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { + return newErrConfigurationInvalid([]error{errEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. +func (config *directClientConfig) getContextName() string { + // REMOVED: overrides support + return config.config.CurrentContext +} + +// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. +func (config *directClientConfig) getAuthInfoName() string { + // REMOVED: overrides support + return config.getContext().AuthInfo +} + +// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. +func (config *directClientConfig) getClusterName() string { + // REMOVED: overrides support + return config.getContext().Cluster +} + +// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. +func (config *directClientConfig) getContext() clientcmdContext { + contexts := config.config.Contexts + contextName := config.getContextName() + + var mergedContext clientcmdContext + if configContext, exists := contexts[contextName]; exists { + mergo.Merge(&mergedContext, configContext) + } + // REMOVED: overrides support + + return mergedContext +} + +var ( + errEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + errEmptyCluster = errors.New("cluster has no server defined") +) + +// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { + var validationErrors []error + + if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { + return []error{errEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + defer clientCertCA.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { + var validationErrors []error + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + defer clientCertFile.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + defer clientKeyFile.Close() + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. +func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { + authInfos := config.config.AuthInfos + authInfoName := config.getAuthInfoName() + + var mergedAuthInfo clientcmdAuthInfo + if configAuthInfo, exists := authInfos[authInfoName]; exists { + mergo.Merge(&mergedAuthInfo, configAuthInfo) + } + // REMOVED: overrides support + + return mergedAuthInfo +} + +// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. +func (config *directClientConfig) getCluster() clientcmdCluster { + clusterInfos := config.config.Clusters + clusterInfoName := config.getClusterName() + + var mergedClusterInfo clientcmdCluster + mergo.Merge(&mergedClusterInfo, defaultCluster) + mergo.Merge(&mergedClusterInfo, envVarCluster) + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + mergo.Merge(&mergedClusterInfo, configClusterInfo) + } + // REMOVED: overrides support + + return mergedClusterInfo +} + +// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregateErr []error + +// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func newAggregate(errlist []error) error { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregateErr(errs) +} + +// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. +// Error is part of the error interface. +func (agg aggregateErr) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// REMOVED: aggregateErr.Errors + +// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +var _ error = errConfigurationInvalid{} + +// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. + +// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) +} + +// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present +type clientConfigLoadingRules struct { + Precedence []string +} + +// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { + errlist := []error{} + + kubeConfigFiles := []string{} + + // REMOVED: explicit path support + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + + kubeconfigs := []*clientcmdConfig{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := loadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdNewConfig() + for _, kubeconfig := range kubeconfigs { + mergo.Merge(mapConfig, kubeconfig) + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdNewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + mergo.Merge(nonMapConfig, kubeconfig) + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdNewConfig() + mergo.Merge(config, mapConfig) + mergo.Merge(config, nonMapConfig) + + // REMOVED: Possibility to skip this. + if err := resolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + + return config, newAggregate(errlist) +} + +// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile +// LoadFromFile takes a filename and deserializes the contents into Config object +func loadFromFile(filename string) (*clientcmdConfig, error) { + kubeconfigBytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := load(kubeconfigBytes) + if err != nil { + return nil, err + } + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdAuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdCluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdContext{} + } + + return config, nil +} + +// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func load(data []byte) (*clientcmdConfig, error) { + config := clientcmdNewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + // Note: This does absolutely no kind/version checking or conversions. + data, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, config); err != nil { + return nil, err + } + return config, nil +} + +// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func resolveLocalPaths(config *clientcmdConfig) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) + } + + if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) + } + + if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. +func getClusterFileReferences(cluster *clientcmdCluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. +func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func resolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { + // REMOVED: Configurable GroupVersion, Codec + // REMOVED: Configurable versionedAPIPath + baseURL, err := defaultServerURLFor(config) + if err != nil { + return nil, nil, err + } + + transport, err := transportFor(config) + if err != nil { + return nil, nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + // REMOVED: Configurable QPS, Burst, ContentConfig + // REMOVED: Actually returning a RESTClient object. + return baseURL, httpClient, nil +} + +// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { + if host == "" { + return nil, errors.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil { + return nil, err + } + if hostURL.Scheme == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // REMOVED: versionedAPIPath computation. + return hostURL, nil +} + +// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerURLFor(config *restConfig) (*url.URL, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 + hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + // REMOVED: Configurable APIPath, GroupVersion + return defaultServerURL(host, defaultTLS) +} + +// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func transportFor(config *restConfig) (http.RoundTripper, error) { + // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support + return transportNew(config) +} + +// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func isConfigTransportTLS(config restConfig) bool { + baseURL, err := defaultServerURLFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func transportNew(config *restConfig) (http.RoundTripper, error) { + // REMOVED: custom config.Transport support. + // Set transport level security + + var ( + rt http.RoundTripper + err error + ) + + rt, err = tlsCacheGet(config) + if err != nil { + return nil, err + } + + // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. + if len(config.Username) != 0 && len(config.BearerToken) != 0 { + return nil, errors.Errorf("username/password or bearer token may be set, but not both") + } + + return rt, nil +} + +// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. +func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { + // REMOVED: any actual caching + + // Get the TLS options for this client config + tlsConfig, err := tlsConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. + t := &http.Transport{ + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + } + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { + _ = http2.ConfigureTransport(t) + } + return t, nil +} + +// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func tlsConfigFor(c *restConfig) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { + return nil, nil + } + if c.HasCA() && c.Insecure { + return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) + MinVersion: tls.VersionTLS10, + InsecureSkipVerify: c.Insecure, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.CertData, c.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *restConfig) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} + +// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. +// HasCA returns whether the configuration has a certificate authority or not. +func (c *restConfig) HasCA() bool { + return len(c.CAData) > 0 || len(c.CAFile) > 0 +} + +// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *restConfig) HasCertAuth() bool { + return len(c.CertData) != 0 || len(c.CertFile) != 0 +} + +// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type clientcmdConfig struct { + // Clusters is a map of referencable names to cluster configs + Clusters clustersMap `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos authInfosMap `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts contextsMap `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` +} + +type clustersMap map[string]*clientcmdCluster + +func (m *clustersMap) UnmarshalJSON(data []byte) error { + var a []v1NamedCluster + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + cluster := e.Cluster // Allocates a new instance in each iteration + (*m)[e.Name] = &cluster + } + return nil +} + +type authInfosMap map[string]*clientcmdAuthInfo + +func (m *authInfosMap) UnmarshalJSON(data []byte) error { + var a []v1NamedAuthInfo + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + authInfo := e.AuthInfo // Allocates a new instance in each iteration + (*m)[e.Name] = &authInfo + } + return nil +} + +type contextsMap map[string]*clientcmdContext + +func (m *contextsMap) UnmarshalJSON(data []byte) error { + var a []v1NamedContext + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + context := e.Context // Allocates a new instance in each iteration + (*m)[e.Name] = &context + } + return nil +} + +// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func clientcmdNewConfig() *clientcmdConfig { + return &clientcmdConfig{ + Clusters: make(map[string]*clientcmdCluster), + AuthInfos: make(map[string]*clientcmdAuthInfo), + Contexts: make(map[string]*clientcmdContext), + } +} + +// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. +// Cluster contains information about how to communicate with a kubernetes cluster +type clientcmdCluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` +} + +// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type clientcmdAuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` +} + +// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type clientcmdContext struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` +} + +// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. +// NamedCluster relates nicknames to cluster information +type v1NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster clientcmdCluster `json:"cluster"` +} + +// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. +// NamedContext relates nicknames to context information +type v1NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context clientcmdContext `json:"context"` +} + +// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. +// NamedAuthInfo relates nicknames to auth information +type v1NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo clientcmdAuthInfo `json:"user"` +} diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go new file mode 100644 index 0000000..814c3ee --- /dev/null +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -0,0 +1,562 @@ +package openshift + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/docker" + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/image/version" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// openshiftClient is configuration for dealing with a single image stream, for reading or writing. +type openshiftClient struct { + ref openshiftReference + baseURL *url.URL + // Values from Kubernetes configuration + httpClient *http.Client + bearerToken string // "" if not used + username string // "" if not used + password string // if username != "" +} + +// newOpenshiftClient creates a new openshiftClient for the specified reference. +func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { + // We have already done this parsing in ParseReference, but thrown away + // httpClient. So, parse again. + // (We could also rework/split restClientFor to "get base URL" to be done + // in ParseReference, and "get httpClient" to be done here. But until/unless + // we support non-default clusters, this is good enough.) + + // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. + cmdConfig := defaultClientConfig() + logrus.Debugf("cmdConfig: %#v", cmdConfig) + restConfig, err := cmdConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) + logrus.Debugf("restConfig: %#v", restConfig) + baseURL, httpClient, err := restClientFor(restConfig) + if err != nil { + return nil, err + } + logrus.Debugf("URL: %#v", *baseURL) + + if httpClient == nil { + httpClient = http.DefaultClient + } + + return &openshiftClient{ + ref: ref, + baseURL: baseURL, + httpClient: httpClient, + bearerToken: restConfig.BearerToken, + username: restConfig.Username, + password: restConfig.Password, + }, nil +} + +// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. +func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { + url := *c.baseURL + url.Path = path + var requestBodyReader io.Reader + if requestBody != nil { + logrus.Debugf("Will send body: %s", requestBody) + requestBodyReader = bytes.NewReader(requestBody) + } + req, err := http.NewRequest(method, url.String(), requestBodyReader) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + if len(c.bearerToken) != 0 { + req.Header.Set("Authorization", "Bearer "+c.bearerToken) + } else if len(c.username) != 0 { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Accept", "application/json, */*") + req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) + if requestBody != nil { + req.Header.Set("Content-Type", "application/json") + } + + logrus.Debugf("%s %s", method, url.String()) + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + logrus.Debugf("Got body: %s", body) + // FIXME: Just throwing this useful information away only to try to guess later... + logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) + + var status status + statusValid := false + if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { + statusValid = true + } + + switch { + case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. + if statusValid && status.Status != "Success" { + return nil, errors.New(status.Message) + } + case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: + // OK. + default: + if statusValid { + return nil, errors.New(status.Message) + } + return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) + } + + return body, nil +} + +// getImage loads the specified image object. +func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) + body, err := c.doRequest(ctx, "GET", path, nil) + if err != nil { + return nil, err + } + // Note: This does absolutely no kind/version checking or conversions. + var isi imageStreamImage + if err := json.Unmarshal(body, &isi); err != nil { + return nil, err + } + return &isi.Image, nil +} + +// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; +// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. +func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { + parts := strings.SplitN(ref, "/", 2) + if len(parts) != 2 { + return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) + } + return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil +} + +type openshiftImageSource struct { + client *openshiftClient + // Values specific to this image + sys *types.SystemContext + // State + docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet + imageStreamImageName string // Resolved image identifier, or "" if not known yet +} + +// newImageSource creates a new ImageSource for the specified reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + return &openshiftImageSource{ + client: client, + sys: sys, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *openshiftImageSource) Reference() types.ImageReference { + return s.client.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *openshiftImageSource) Close() error { + if s.docker != nil { + err := s.docker.Close() + s.docker = nil + + return err + } + + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, "", err + } + return s.docker.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, 0, err + } + return s.docker.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageName = s.imageStreamImageName + } else { + imageName = instanceDigest.String() + } + image, err := s.client.getImage(ctx, imageName) + if err != nil { + return nil, err + } + var sigs [][]byte + for _, sig := range image.Signatures { + if sig.Type == imageSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} + +// ensureImageIsResolved sets up s.docker and s.imageStreamImageName +func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { + if s.docker != nil { + return nil + } + + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) + body, err := s.client.doRequest(ctx, "GET", path, nil) + if err != nil { + return err + } + // Note: This does absolutely no kind/version checking or conversions. + var is imageStream + if err := json.Unmarshal(body, &is); err != nil { + return err + } + var te *tagEvent + for _, tag := range is.Status.Tags { + if tag.Tag != s.client.ref.dockerReference.Tag() { + continue + } + if len(tag.Items) > 0 { + te = &tag.Items[0] + break + } + } + if te == nil { + return errors.Errorf("No matching tag found") + } + logrus.Debugf("tag event %#v", te) + dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) + if err != nil { + return err + } + logrus.Debugf("Resolved reference %#v", dockerRefString) + dockerRef, err := docker.ParseReference("//" + dockerRefString) + if err != nil { + return err + } + d, err := dockerRef.NewImageSource(ctx, s.sys) + if err != nil { + return err + } + s.docker = d + s.imageStreamImageName = te.Image + return nil +} + +type openshiftImageDestination struct { + client *openshiftClient + docker types.ImageDestination // The Docker Registry endpoint + // State + imageStreamImageName string // "" if not yet known +} + +// newImageDestination creates a new ImageDestination for the specified reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, + // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know + // the manifest digest at this point. + dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) + dockerRef, err := docker.ParseReference(dockerRefString) + if err != nil { + return nil, err + } + docker, err := dockerRef.NewImageDestination(ctx, sys) + if err != nil { + return nil, err + } + + return &openshiftImageDestination{ + client: client, + docker: docker, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *openshiftImageDestination) Reference() types.ImageReference { + return d.client.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *openshiftImageDestination) Close() error { + return d.docker.Close() +} + +func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { + return d.docker.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte) error { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() + + return d.docker.PutManifest(ctx, m) +} + +func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + if d.imageStreamImageName == "" { + return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") + } + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures. + + if len(signatures) == 0 { + return nil // No need to even read the old state. + } + + image, err := d.client.getImage(ctx, d.imageStreamImageName) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range image.Signatures { + existingSigNames[sig.objectMeta.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range image.Signatures { + if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "Error generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + // Note: This does absolutely no kind/version checking or conversions. + sig := imageSignature{ + typeMeta: typeMeta{ + Kind: "ImageSignature", + APIVersion: "v1", + }, + objectMeta: objectMeta{Name: signatureName}, + Type: imageSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body) + if err != nil { + return err + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) Commit(ctx context.Context) error { + return d.docker.Commit(ctx) +} + +// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. +type imageStream struct { + Status imageStreamStatus `json:"status,omitempty"` +} +type imageStreamStatus struct { + DockerImageRepository string `json:"dockerImageRepository"` + Tags []namedTagEventList `json:"tags,omitempty"` +} +type namedTagEventList struct { + Tag string `json:"tag"` + Items []tagEvent `json:"items"` +} +type tagEvent struct { + DockerImageReference string `json:"dockerImageReference"` + Image string `json:"image"` +} +type imageStreamImage struct { + Image image `json:"image"` +} +type image struct { + objectMeta `json:"metadata,omitempty"` + DockerImageReference string `json:"dockerImageReference,omitempty"` + // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest string `json:"dockerImageManifest,omitempty"` + // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` + Signatures []imageSignature `json:"signatures,omitempty"` +} + +const imageSignatureTypeAtomic string = "atomic" + +type imageSignature struct { + typeMeta `json:",inline"` + objectMeta `json:"metadata,omitempty"` + Type string `json:"type"` + Content []byte `json:"content"` + // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ImageIdentity string `json:"imageIdentity,omitempty"` + // SignedClaims map[string]string `json:"signedClaims,omitempty"` + // Created *unversioned.Time `json:"created,omitempty"` + // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` + // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` +} +type typeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} +type objectMeta struct { + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + Namespace string `json:"namespace,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Generation int64 `json:"generation,omitempty"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status +type status struct { + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` + // Reason StatusReason `json:"reason,omitempty"` + // Details *StatusDetails `json:"details,omitempty"` + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go new file mode 100644 index 0000000..b27867a --- /dev/null +++ b/vendor/github.com/containers/image/openshift/openshift_transport.go @@ -0,0 +1,157 @@ +package openshift + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/containers/image/docker/policyconfiguration" + "github.com/containers/image/docker/reference" + genericImage "github.com/containers/image/image" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OpenShift registry-hosted images. +var Transport = openshiftTransport{} + +type openshiftTransport struct{} + +func (t openshiftTransport) Name() string { + return "atomic" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// Note that imageNameRegexp is namespace/stream:tag, this +// is HOSTNAME/namespace/stream:tag or parent prefixes. +// Keep this in sync with imageNameRegexp! +var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { + if scopeRegexp.FindStringIndex(scope) == nil { + return errors.Errorf("Invalid scope name %s", scope) + } + return nil +} + +// openshiftReference is an ImageReference for OpenShift images. +type openshiftReference struct { + dockerReference reference.NamedTagged + namespace string // Computed from dockerReference in advance. + stream string // Computed from dockerReference in advance. +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. +func ParseReference(ref string) (types.ImageReference, error) { + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) + } + tagged, ok := r.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) + } + return NewReference(tagged) +} + +// NewReference returns an OpenShift reference for a reference.NamedTagged +func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { + r := strings.SplitN(reference.Path(dockerRef), "/", 3) + if len(r) != 2 { + return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", + reference.FamiliarString(dockerRef)) + } + return openshiftReference{ + namespace: r[0], + stream: r[1], + dockerReference: dockerRef, + }, nil +} + +func (ref openshiftReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref openshiftReference) StringWithinTransport() string { + return reference.FamiliarString(ref.dockerReference) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref openshiftReference) DockerReference() reference.Named { + return ref.dockerReference +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref openshiftReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) + if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref openshiftReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return genericImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for atomic: images") +} diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go new file mode 100644 index 0000000..d69f4fa --- /dev/null +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -0,0 +1,504 @@ +// +build !containers_image_ostree_stub + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" + "github.com/opencontainers/go-digest" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type blobToImport struct { + Size int64 + Digest digest.Digest + BlobPath string +} + +type descriptor struct { + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` +} + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type manifestSchema struct { + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` +} + +type ostreeImageDestination struct { + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo +} + +// newImageDestination returns an ImageDestination for writing to an existing ostree. +func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { + tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) + if err := ensureDirectoryExists(tmpDirPath); err != nil { + return nil, err + } + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ostreeImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } + return os.RemoveAll(d.tmpDirPath) +} + +func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") + if err != nil { + return types.BlobInfo{}, err + } + + blobPath := filepath.Join(tmpDir, "content") + blobFile, err := os.Create(blobPath) + if err != nil { + return types.BlobInfo{}, err + } + defer blobFile.Close() + + digester := digest.Canonical.Digester() + tee := io.TeeReader(stream, digester.Hash()) + + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, tee) + if err != nil { + return types.BlobInfo{}, err + } + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + hash := computedDigest.Hex() + d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { + entries, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + + for _, info := range entries { + fullpath := filepath.Join(dir, info.Name()) + if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if err := os.Remove(fullpath); err != nil { + return err + } + continue + } + + if selinuxHnd != nil { + relPath, err := filepath.Rel(root, fullpath) + if err != nil { + return err + } + // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, + // thus we benefit from maintaining the same SELinux label they would have on the host as we could + // use hard links instead of copying the files. + relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) + + relPathC := C.CString(relPath) + defer C.free(unsafe.Pointer(relPathC)) + var context *C.char + + res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) + if int(res) < 0 && err != syscall.ENOENT { + return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) + } + if int(res) == 0 { + defer C.freecon(context) + fullpathC := C.CString(fullpath) + defer C.free(unsafe.Pointer(fullpathC)) + res, err = C.lsetfilecon_raw(fullpathC, context) + if int(res) < 0 { + return errors.Wrapf(err, "cannot setfilecon_raw %s", fullpath) + } + } + } + + if info.IsDir() { + if usermode { + if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { + return err + } + } + err = fixFiles(selinuxHnd, root, fullpath, usermode) + if err != nil { + return err + } + } else if usermode && (info.Mode().IsRegular()) { + if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { + return err + } + } + } + + return nil +} + +func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { + opts := otbuiltin.NewCommitOptions() + opts.AddMetadataString = metadata + opts.Timestamp = time.Now() + // OCI layers have no parent OSTree commit + opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + _, err := repo.Commit(root, branch, opts) + return err +} + +func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { + mfz := pgzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return "", -1, err + } + defer stream.Close() + + gzReader, err := archive.DecompressStream(stream) + if err != nil { + return "", -1, err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return "", -1, err + } + + digester := digest.Canonical.Digester() + + written, err := io.Copy(digester.Hash(), its) + if err != nil { + return "", -1, err + } + + return digester.Digest(), written, nil +} + +func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") + if err := ensureDirectoryExists(destinationPath); err != nil { + return err + } + defer func() { + os.Remove(blob.BlobPath) + os.RemoveAll(destinationPath) + }() + + var tarSplitOutput bytes.Buffer + uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) + if err != nil { + return err + } + + if os.Getuid() == 0 { + if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { + return err + } + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { + return err + } + } else { + os.MkdirAll(destinationPath, 0755) + if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { + return err + } + + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { + return err + } + } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), + fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, types.BlobInfo{}, err + } + d.repo = repo + } + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + size, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return false, types.BlobInfo{}, err + } + + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { + d.manifest = string(manifestBlob) + + if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { + return err + } + + manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) + if err := ensureParentDirectoryExists(manifestPath); err != nil { + return err + } + + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + d.digest = digest + + return ioutil.WriteFile(manifestPath, manifestBlob, 0644) +} + +func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) + if err := ensureParentDirectoryExists(path); err != nil { + return err + } + + for i, sig := range signatures { + signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) + if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + return err + } + } + d.signaturesLen = len(signatures) + return nil +} + +func (d *ostreeImageDestination) Commit(ctx context.Context) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := otbuiltin.OpenRepo(d.ref.repo) + if err != nil { + return err + } + + _, err = repo.PrepareTransaction() + if err != nil { + return err + } + + var selinuxHnd *C.struct_selabel_handle + + if os.Getuid() == 0 && selinux.GetEnabled() { + selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) + if selinuxHnd == nil { + return errors.Wrapf(err, "cannot open the SELinux DB") + } + + defer C.selabel_close(selinuxHnd) + } + + checkLayer := func(hash string) error { + blob := d.blobs[hash] + // if the blob is not present in d.blobs then it is already stored in OSTree, + // and we don't need to import it. + if blob == nil { + return nil + } + err := d.importBlob(selinuxHnd, repo, blob) + if err != nil { + return err + } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) + if err != nil { + return err + } + } + + manifestPath := filepath.Join(d.tmpDirPath, "manifest") + + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} + if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { + return err + } + + _, err = repo.CommitTransaction() + return err +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go new file mode 100644 index 0000000..35d8521 --- /dev/null +++ b/vendor/github.com/containers/image/ostree/ostree_src.go @@ -0,0 +1,410 @@ +// +build !containers_image_ostree_stub + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo + // get the compressed layer by its uncompressed checksum + compressed map[digest.Digest]digest.Digest +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, "docker.size") + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + + blob := info.Digest.Hex() + + // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. + if s.compressed == nil { + _, err := s.LayerInfosForCopy(ctx) + if err != nil { + return nil, -1, err + } + + } + compressedBlob, found := s.compressed[info.Digest] + if found { + blob = compressedBlob.Hex() + } + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getLayerSize(blob) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := pgzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + mfz.Close() + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + rc := ioutils.NewReadCloserWrapper(ots, func() error { + getter.Close() + mfz.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by this transport") + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := ioutil.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + updatedBlobInfos := []types.BlobInfo{} + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + man, err := manifest.FromBlob(manifestBlob, manifestType) + + s.compressed = make(map[digest.Digest]digest.Digest) + + layerBlobs := man.LayerInfos() + + for _, layerBlob := range layerBlobs { + branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) + found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return nil, err + } + + found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return nil, err + } + + uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) + if err != nil { + return nil, err + } + uncompressedDigest := digest.Digest(uncompressedDigestStr) + blobInfo := types.BlobInfo{ + Digest: uncompressedDigest, + Size: uncompressedSize, + MediaType: layerBlob.MediaType, + } + s.compressed[uncompressedDigest] = layerBlob.Digest + updatedBlobInfos = append(updatedBlobInfos, blobInfo) + } + return updatedBlobInfos, nil +} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go new file mode 100644 index 0000000..c985653 --- /dev/null +++ b/vendor/github.com/containers/image/ostree/ostree_transport.go @@ -0,0 +1,252 @@ +// +build !containers_image_ostree_stub + +package ostree + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/containers/image/directory/explicitfilepath" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +const defaultOSTreeRepo = "/ostree/repo" + +// Transport is an ImageTransport for ostree paths. +var Transport = ostreeTransport{} + +type ostreeTransport struct{} + +func (t ostreeTransport) Name() string { + return "ostree" +} + +func init() { + transports.Register(Transport) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { + sep := strings.Index(scope, ":") + if sep < 0 { + return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) + } + repo := scope[:sep] + + if !strings.HasPrefix(repo, "/") { + return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) + } + cleaned := filepath.Clean(repo) + if cleaned != repo { + return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + // FIXME? In the namespaces within a repo, + // we could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// ostreeReference is an ImageReference for ostree paths. +type ostreeReference struct { + image string + branchName string + repo string +} + +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + +func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { + var repo = "" + var image = "" + s := strings.SplitN(ref, "@/", 2) + if len(s) == 1 { + image, repo = s[0], defaultOSTreeRepo + } else { + image, repo = s[0], "/"+s[1] + } + + return NewReference(image, repo) +} + +// NewReference returns an OSTree reference for a specified repo and image. +func NewReference(image string, repo string) (types.ImageReference, error) { + // image is not _really_ in a containers/image/docker/reference format; + // as far as the libOSTree ociimage/* namespace is concerned, it is more or + // less an arbitrary string with an implied tag. + // Parse the image using reference.ParseNormalizedNamed so that we can + // check whether the images has a tag specified and we can add ":latest" if needed + ostreeImage, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if reference.IsNameOnly(ostreeImage) { + image = image + ":latest" + } + + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) + if err != nil { + // With os.IsNotExist(err), the parent directory of repo is also not existent; + // that should ordinarily not happen, but it would be a bit weird to reject + // references which do not specify a repo just because the implicit defaultOSTreeRepo + // does not exist. + if os.IsNotExist(err) && repo == defaultOSTreeRepo { + resolved = repo + } else { + return nil, err + } + } + // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces + // from being ambiguous with values of PolicyConfigurationIdentity. + if strings.Contains(resolved, ":") { + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) + } + + return ostreeReference{ + image: image, + branchName: encodeOStreeRef(image), + repo: resolved, + }, nil +} + +func (ref ostreeReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ostreeReference) StringWithinTransport() string { + return fmt.Sprintf("%s@%s", ref.image, ref.repo) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ostreeReference) DockerReference() reference.Named { + return nil +} + +func (ref ostreeReference) PolicyConfigurationIdentity() string { + return fmt.Sprintf("%s:%s", ref.repo, ref.image) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ostreeReference) PolicyConfigurationNamespaces() []string { + s := strings.SplitN(ref.image, ":", 2) + if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) + } + name := s[0] + res := []string{} + for { + res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} + +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + src, err := newImageSource(tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageSource(tmpDir, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageDestination(ref, tmpDir) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for ostree: images") +} + +var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) + +func encodeOStreeRef(in string) string { + var buffer bytes.Buffer + for i := range in { + sub := in[i : i+1] + if ostreeRefRegexp.MatchString(sub) { + buffer.WriteString(sub) + } else { + buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) + } + + } + return buffer.String() +} + +// manifestPath returns a path for the manifest within a ostree using our conventions. +func (ref ostreeReference) manifestPath() string { + return filepath.Join("manifest", "manifest.json") +} + +// signaturePath returns a path for a signature within a ostree using our conventions. +func (ref ostreeReference) signaturePath(index int) string { + return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go new file mode 100644 index 0000000..19d0a6c --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go @@ -0,0 +1,332 @@ +// Package boltdb implements a BlobInfoCache backed by BoltDB. +package boltdb + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/containers/image/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/types" + bolt "github.com/etcd-io/bbolt" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + + // FIXME: For CRI-O, does this need to hide information between different users? + + // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. + uncompressedDigestBucket = []byte("uncompressedDigest") + // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest + // (as a set of key=digest, value="" pairs) + digestByUncompressedBucket = []byte("digestByUncompressed") + // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing + // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). + knownLocationsBucket = []byte("knownLocations") +) + +// Concurrency: +// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely +// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. + +// pathLock contains a lock for a specific BoltDB database path. +type pathLock struct { + refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! + mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. +} + +var ( + // pathLocks contains a lock for each currently open file. + // This must be global so that independently created instances of boltDBCache exclude each other. + // The map is protected by pathLocksMutex. + // FIXME? Should this be based on device:inode numbers instead of paths instead? + pathLocks = map[string]*pathLock{} + pathLocksMutex = sync.Mutex{} +) + +// lockPath obtains the pathLock for path. +// The caller must call unlockPath eventually. +func lockPath(path string) { + pl := func() *pathLock { // A scope for defer + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if ok { + pl.refCount++ + } else { + pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} + pathLocks[path] = pl + } + return pl + }() + pl.mutex.Lock() +} + +// unlockPath releases the pathLock for path. +func unlockPath(path string) { + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if !ok { + // Should this return an error instead? BlobInfoCache ultimately ignores errors… + panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) + } + pl.mutex.Unlock() + pl.refCount-- + if pl.refCount == 0 { + delete(pathLocks, path) + } +} + +// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. +// +// Note that we don’t keep the database open across operations, because that would lock the file and block any other +// users; instead, we need to open/close it for every single write or lookup. +type cache struct { + path string +} + +// New returns a BlobInfoCache implementation which uses a BoltDB file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) types.BlobInfoCache { + return &cache{path: path} +} + +// view returns runs the specified fn within a read-only transaction on the database. +func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { + // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, + // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding + // a read lock, blocking any future writes. + // Hence this preliminary check, which is RACY: Another process could remove the file + // between the Lstat call and opening the database. + if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { + return err + } + + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.View(fn) +} + +// update returns runs the specified fn within a read-write transaction on the database. +func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, nil) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.Update(fn) +} + +// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. +func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { + if b := tx.Bucket(uncompressedDigestBucket); b != nil { + if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { + d, err := digest.Parse(string(uncompressedBytes)) + if err == nil { + return d + } + // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + } + // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if b := tx.Bucket(digestByUncompressedBucket); b != nil { + if b = b.Bucket([]byte(anyDigest.String())); b != nil { + c := b.Cursor() + if k, _ := c.First(); k != nil { // The bucket is non-empty + return anyDigest + } + } + } + return "" +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + var res digest.Digest + if err := bdc.view(func(tx *bolt.Tx) error { + res = bdc.uncompressedDigest(tx, anyDigest) + return nil + }); err != nil { // Including os.IsNotExist(err) + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + previous, err := digest.Parse(string(previousBytes)) + if err != nil { + return err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if err := b.Put(key, []byte(uncompressed.String())); err != nil { + return err + } + + b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) + if err != nil { + return err + } + if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) + if err != nil { + return err + } + value, err := time.Now().MarshalBinary() + if err != nil { + return err + } + if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { + b := scopeBucket.Bucket([]byte(digest.String())) + if b == nil { + return candidates + } + _ = b.ForEach(func(k, v []byte) error { + t := time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return err + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: types.BICLocationReference{Opaque: string(k)}, + }, + LastSeen: t, + }) + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + res := []prioritize.CandidateWithTime{} + var uncompressedDigestValue digest.Digest // = "" + if err := bdc.view(func(tx *bolt.Tx) error { + scopeBucket := tx.Bucket(knownLocationsBucket) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) + if scopeBucket == nil { + return nil + } + + res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + if canSubstitute { + if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { + b := tx.Bucket(digestByUncompressedBucket) + if b != nil { + b = b.Bucket([]byte(uncompressedDigestValue.String())) + if b != nil { + if err := b.ForEach(func(k, _ []byte) error { + d, err := digest.Parse(string(k)) + if err != nil { + return err + } + if d != primaryDigest && d != uncompressedDigestValue { + res = bdc.appendReplacementCandidates(res, scopeBucket, d) + } + return nil + }); err != nil { + return err + } + } + } + if uncompressedDigestValue != primaryDigest { + res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + } + } + } + return nil + }); err != nil { // Including os.IsNotExist(err) + return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go new file mode 100644 index 0000000..3573332 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go @@ -0,0 +1,75 @@ +package blobinfocache + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/containers/image/pkg/blobinfocache/boltdb" + "github.com/containers/image/pkg/blobinfocache/memory" + "github.com/containers/image/types" + "github.com/sirupsen/logrus" +) + +const ( + // blobInfoCacheFilename is the file name used for blob info caches. + // If the format changes in an incompatible way, increase the version number. + blobInfoCacheFilename = "blob-info-cache-v1.boltdb" + // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. + systemBlobInfoCacheDir = "/var/lib/containers/cache" +) + +// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. +// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. +func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { + if sys != nil && sys.BlobInfoCacheDir != "" { + return sys.BlobInfoCacheDir, nil + } + + // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged + // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. + if euid == 0 { + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil + } + return systemBlobInfoCacheDir, nil + } + + // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home := os.Getenv("HOME") + if home == "" { + return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") + } + dataDir = filepath.Join(home, ".local", "share") + } + return filepath.Join(dataDir, "containers", "cache"), nil +} + +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +// DefaultCache returns the default BlobInfoCache implementation appropriate for sys. +func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { + dir, err := blobInfoCacheDir(sys, getRootlessUID()) + if err != nil { + logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) + return memory.New() + } + path := filepath.Join(dir, blobInfoCacheFilename) + if err := os.MkdirAll(dir, 0700); err != nil { + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) + return memory.New() + } + + logrus.Debugf("Using blob info cache at %s", path) + return boltdb.New(path) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 0000000..5479319 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,110 @@ +// Package prioritize provides utilities for prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "sort" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" +) + +// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + Candidate types.BICReplacementCandidate // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) +} + +// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, +// along with the specially-treated digest values for the implementation of sort.Interface.Less +type candidateSortState struct { + cs []CandidateWithTime // The entries to sort + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) Len() int { + return len(css.cs) +} + +func (css *candidateSortState) Less(i, j int) bool { + xi := css.cs[i] + xj := css.cs[j] + + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.Candidate.Digest != xj.Candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.Candidate.Digest == css.primaryDigest { + return true + } + if xj.Candidate.Digest == css.primaryDigest { + return false + } + if css.uncompressedDigest != "" { + if xi.Candidate.Digest == css.uncompressedDigest { + return false + } + if xj.Candidate.Digest == css.uncompressedDigest { + return true + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { + return xi.LastSeen.After(xj.LastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time + return xi.LastSeen.After(xj.LastSeen) + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return xi.Candidate.Digest < xj.Candidate.Digest +} + +func (css *candidateSortState) Swap(i, j int) { + css.cs[i], css.cs[j] = css.cs[j], css.cs[i] +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the +// number of entries to limit, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + sort.Sort(&candidateSortState{ + cs: cs, + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }) + + resLength := len(cs) + if resLength > maxCandidates { + resLength = maxCandidates + } + res := make([]types.BICReplacementCandidate, resLength) + for i := range res { + res[i] = cs[i].Candidate + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), +// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go new file mode 100644 index 0000000..dfb3386 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,145 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + } +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = map[digest.Digest]struct{}{} + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + for l, t := range locations { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: l, + }, + LastSeen: t, + }) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + for d := range otherDigests { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d) + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go new file mode 100644 index 0000000..e5dca25 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go @@ -0,0 +1,49 @@ +// Package none implements a dummy BlobInfoCache which records no data. +package none + +import ( + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for +// Manifest.Inspect, because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a +// short-lived cache, ideally blobinfocache.DefaultCache. +var NoCache types.BlobInfoCache = noCache{} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go new file mode 100644 index 0000000..aad2bfc --- /dev/null +++ b/vendor/github.com/containers/image/pkg/compression/compression.go @@ -0,0 +1,94 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "io" + "io/ioutil" + + "github.com/klauspost/pgzip" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return ioutil.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil +} + +// compressionAlgos is an internal implementation detail of DetectCompression +var compressionAlgos = map[string]struct { + prefix []byte + decompressor DecompressorFunc +}{ + "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952) + "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress) + "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return nil, nil, err + } + + var decompressor DecompressorFunc + for name, algo := range compressionAlgos { + if bytes.HasPrefix(buffer[:n], algo.prefix) { + logrus.Debugf("Detected compression format %s", name) + decompressor = algo.decompressor + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error detecting compression") + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error initializing decompression") + } + } else { + res = ioutil.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go new file mode 100644 index 0000000..57b548e --- /dev/null +++ b/vendor/github.com/containers/image/pkg/docker/config/config.go @@ -0,0 +1,306 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/types" + helperclient "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerAuthConfig struct { + Auth string `json:"auth,omitempty"` +} + +type dockerConfigFile struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +var ( + defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") + xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") + dockerHomePath = filepath.FromSlash(".docker/config.json") + dockerLegacyHomePath = ".dockercfg" + + // ErrNotLoggedIn is returned for users not logged into a registry + // that they are trying to logout of + ErrNotLoggedIn = errors.New("not logged in") +) + +// SetAuthentication stores the username and password in the auth.json file +func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if ch, exists := auths.CredHelpers[registry]; exists { + return false, setAuthToCredHelper(ch, registry, username, password) + } + + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + auths.AuthConfigs[registry] = newCreds + return true, nil + }) +} + +// GetAuthentication returns the registry credentials stored in +// either auth.json file or .docker/config.json +// If an entry is not found empty strings are returned for the username and password +func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { + if sys != nil && sys.DockerAuthConfig != nil { + return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil + } + + dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath) + var paths []string + pathToAuth, err := getPathToAuth(sys) + if err == nil { + paths = append(paths, pathToAuth) + } else { + // Error means that the path set for XDG_RUNTIME_DIR does not exist + // but we don't want to completely fail in the case that the user is pulling a public image + // Logging the error as a warning instead and moving on to pulling the image + logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) + } + paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath) + + for _, path := range paths { + legacyFormat := path == dockerLegacyPath + username, password, err := findAuthentication(registry, path, legacyFormat) + if err != nil { + return "", "", err + } + if username != "" && password != "" { + return username, password, nil + } + } + return "", "", nil +} + +// RemoveAuthentication deletes the credentials stored in auth.json +func RemoveAuthentication(sys *types.SystemContext, registry string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + // First try cred helpers. + if ch, exists := auths.CredHelpers[registry]; exists { + return false, deleteAuthFromCredHelper(ch, registry) + } + + if _, ok := auths.AuthConfigs[registry]; ok { + delete(auths.AuthConfigs, registry) + } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { + delete(auths.AuthConfigs, normalizeRegistry(registry)) + } else { + return false, ErrNotLoggedIn + } + return true, nil + }) +} + +// RemoveAllAuthentication deletes all the credentials stored in auth.json +func RemoveAllAuthentication(sys *types.SystemContext) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + auths.CredHelpers = make(map[string]string) + auths.AuthConfigs = make(map[string]dockerAuthConfig) + return true, nil + }) +} + +// getPath gets the path of the auth.json file +// The path can be overriden by the user if the overwrite-path flag is set +// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers +// Otherwise, the auth.json file is stored in /run/containers/UID +func getPathToAuth(sys *types.SystemContext) (string, error) { + if sys != nil { + if sys.AuthFilePath != "" { + return sys.AuthFilePath, nil + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil + } + } + + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. + // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. + _, err := os.Stat(runtimeDir) + if os.IsNotExist(err) { + // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory + // or made a typo while setting the environment variable, + // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. + return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) + } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. + return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil + } + return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil +} + +// readJSONFile unmarshals the authentications stored in the auth.json file and returns it +// or returns an empty dockerConfigFile data structure if auth.json does not exist +// if the file exists and is empty, readJSONFile returns an error +func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { + var auths dockerConfigFile + + raw, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + auths.AuthConfigs = map[string]dockerAuthConfig{} + return auths, nil + } + return dockerConfigFile{}, err + } + + if legacyFormat { + if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + return auths, nil + } + + if err = json.Unmarshal(raw, &auths); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + + return auths, nil +} + +// modifyJSON writes to auth.json if the dockerConfigFile has been updated +func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { + path, err := getPathToAuth(sys) + if err != nil { + return err + } + + dir := filepath.Dir(path) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0700); err != nil { + return errors.Wrapf(err, "error creating directory %q", dir) + } + } + + auths, err := readJSONFile(path, false) + if err != nil { + return errors.Wrapf(err, "error reading JSON file %q", path) + } + + updated, err := editor(&auths) + if err != nil { + return errors.Wrapf(err, "error updating %q", path) + } + if updated { + newData, err := json.MarshalIndent(auths, "", "\t") + if err != nil { + return errors.Wrapf(err, "error marshaling JSON %q", path) + } + + if err = ioutil.WriteFile(path, newData, 0755); err != nil { + return errors.Wrapf(err, "error writing to file %q", path) + } + } + + return nil +} + +func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds, err := helperclient.Get(p, registry) + if err != nil { + return "", "", err + } + return creds.Username, creds.Secret, nil +} + +func setAuthToCredHelper(credHelper, registry, username, password string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds := &credentials.Credentials{ + ServerURL: registry, + Username: username, + Secret: password, + } + return helperclient.Store(p, creds) +} + +func deleteAuthFromCredHelper(credHelper, registry string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.Erase(p, registry) +} + +// findAuthentication looks for auth of registry in path +func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { + auths, err := readJSONFile(path, legacyFormat) + if err != nil { + return "", "", errors.Wrapf(err, "error reading JSON file %q", path) + } + + // First try cred helpers. They should always be normalized. + if ch, exists := auths.CredHelpers[registry]; exists { + return getAuthFromCredHelper(ch, registry) + } + + // I'm feeling lucky + if val, exists := auths.AuthConfigs[registry]; exists { + return decodeDockerAuth(val.Auth) + } + + // bad luck; let's normalize the entries first + registry = normalizeRegistry(registry) + normalizedAuths := map[string]dockerAuthConfig{} + for k, v := range auths.AuthConfigs { + normalizedAuths[normalizeRegistry(k)] = v + } + if val, exists := normalizedAuths[registry]; exists { + return decodeDockerAuth(val.Auth) + } + return "", "", nil +} + +func decodeDockerAuth(s string) (string, string, error) { + decoded, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + // if it's invalid just skip, as docker does + return "", "", nil + } + user := parts[0] + password := strings.Trim(parts[1], "\x00") + return user, password, nil +} + +// convertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry/auth.go +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +func normalizeRegistry(registry string) string { + normalized := convertToHostname(registry) + switch normalized { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return normalized +} diff --git a/vendor/github.com/containers/image/pkg/strslice/README.md b/vendor/github.com/containers/image/pkg/strslice/README.md new file mode 100644 index 0000000..ae6097e --- /dev/null +++ b/vendor/github.com/containers/image/pkg/strslice/README.md @@ -0,0 +1 @@ +This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice.go b/vendor/github.com/containers/image/pkg/strslice/strslice.go new file mode 100644 index 0000000..bad493f --- /dev/null +++ b/vendor/github.com/containers/image/pkg/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go new file mode 100644 index 0000000..361e6fc --- /dev/null +++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go @@ -0,0 +1,471 @@ +package sysregistriesv2 + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/image/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/containers/image/docker/reference" +) + +// systemRegistriesConfPath is the path to the system-wide registry +// configuration file and is used to add/subtract potential registries for +// obtaining images. You can override this at build time with +// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' +var systemRegistriesConfPath = builtinRegistriesConfPath + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/etc/containers/registries.conf" + +// Endpoint describes a remote location of a registry. +type Endpoint struct { + // The endpoint's remote location. + Location string `toml:"location"` + // If true, certs verification will be skipped and HTTP (non-TLS) + // connections will be allowed. + Insecure bool `toml:"insecure"` +} + +// rewriteReference will substitute the provided reference `prefix` to the +// endpoints `location` from the `ref` and creates a new named reference from it. +// The function errors if the newly created reference is not parsable. +func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { + refString := ref.String() + if !refMatchesPrefix(refString, prefix) { + return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) + } + + newNamedRef := strings.Replace(refString, prefix, e.Location, 1) + newParsedRef, err := reference.ParseNamed(newNamedRef) + if err != nil { + return nil, errors.Wrapf(err, "error rewriting reference") + } + logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) + return newParsedRef, nil +} + +// Registry represents a registry. +type Registry struct { + // A registry is an Endpoint too + Endpoint + // The registry's mirrors. + Mirrors []Endpoint `toml:"mirror"` + // If true, pulling from the registry will be blocked. + Blocked bool `toml:"blocked"` + // If true, mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + MirrorByDigestOnly bool `toml:"mirror-by-digest-only"` + // Prefix is used for matching images, and to translate one namespace to + // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` + // and we pull from "example.com/bar/myimage:latest", the image will + // effectively be pulled from "example.com/foo/bar/myimage:latest". + // If no Prefix is specified, it defaults to the specified location. + Prefix string `toml:"prefix"` +} + +// PullSource consists of an Endpoint and a Reference. Note that the reference is +// rewritten according to the registries prefix and the Endpoint's location. +type PullSource struct { + Endpoint Endpoint + Reference reference.Named +} + +// PullSourcesFromReference returns a slice of PullSource's based on the passed +// reference. +func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { + var endpoints []Endpoint + + if r.MirrorByDigestOnly { + // Only use mirrors when the reference is a digest one. + if _, isDigested := ref.(reference.Canonical); isDigested { + endpoints = append(r.Mirrors, r.Endpoint) + } else { + endpoints = []Endpoint{r.Endpoint} + } + } else { + endpoints = append(r.Mirrors, r.Endpoint) + } + + sources := []PullSource{} + for _, ep := range endpoints { + rewritten, err := ep.rewriteReference(ref, r.Prefix) + if err != nil { + return nil, err + } + sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) + } + + return sources, nil +} + +// V1TOMLregistries is for backwards compatibility to sysregistries v1 +type V1TOMLregistries struct { + Registries []string `toml:"registries"` +} + +// V1TOMLConfig is for backwards compatibility to sysregistries v1 +type V1TOMLConfig struct { + Search V1TOMLregistries `toml:"search"` + Insecure V1TOMLregistries `toml:"insecure"` + Block V1TOMLregistries `toml:"block"` +} + +// V1RegistriesConf is the sysregistries v1 configuration format. +type V1RegistriesConf struct { + V1TOMLConfig `toml:"registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V1RegistriesConf) Nonempty() bool { + return (len(config.V1TOMLConfig.Search.Registries) != 0 || + len(config.V1TOMLConfig.Insecure.Registries) != 0 || + len(config.V1TOMLConfig.Block.Registries) != 0) +} + +// V2RegistriesConf is the sysregistries v2 configuration format. +type V2RegistriesConf struct { + Registries []Registry `toml:"registry"` + // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references + UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V2RegistriesConf) Nonempty() bool { + return (len(config.Registries) != 0 || + len(config.UnqualifiedSearchRegistries) != 0) +} + +// tomlConfig is the data type used to unmarshal the toml config. +type tomlConfig struct { + V2RegistriesConf + V1RegistriesConf // for backwards compatibility with sysregistries v1 +} + +// InvalidRegistries represents an invalid registry configurations. An example +// is when "registry.com" is defined multiple times in the configuration but +// with conflicting security settings. +type InvalidRegistries struct { + s string +} + +// Error returns the error string. +func (e *InvalidRegistries) Error() string { + return e.s +} + +// parseLocation parses the input string, performs some sanity checks and returns +// the sanitized input string. An error is returned if the input string is +// empty or if contains an "http{s,}://" prefix. +func parseLocation(input string) (string, error) { + trimmed := strings.TrimRight(input, "/") + + if trimmed == "" { + return "", &InvalidRegistries{s: "invalid location: cannot be empty"} + } + + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) + return "", &InvalidRegistries{s: msg} + } + + return trimmed, nil +} + +// ConvertToV2 returns a v2 config corresponding to a v1 one. +func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { + regMap := make(map[string]*Registry) + // The order of the registries is not really important, but make it deterministic (the same for the same config file) + // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. + registryOrder := []string{} + + getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object + var err error + location, err = parseLocation(location) + if err != nil { + return nil, err + } + reg, exists := regMap[location] + if !exists { + reg = &Registry{ + Endpoint: Endpoint{Location: location}, + Mirrors: []Endpoint{}, + Prefix: location, + } + regMap[location] = reg + registryOrder = append(registryOrder, location) + } + return reg, nil + } + + for _, blocked := range config.V1TOMLConfig.Block.Registries { + reg, err := getRegistry(blocked) + if err != nil { + return nil, err + } + reg.Blocked = true + } + for _, insecure := range config.V1TOMLConfig.Insecure.Registries { + reg, err := getRegistry(insecure) + if err != nil { + return nil, err + } + reg.Insecure = true + } + + res := &V2RegistriesConf{ + UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, + } + for _, location := range registryOrder { + reg := regMap[location] + res.Registries = append(res.Registries, *reg) + } + return res, nil +} + +// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. +var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") + +// postProcess checks the consistency of all the configuration, looks for conflicts, +// and normalizes the configuration (e.g., sets the Prefix to Location if not set). +func (config *V2RegistriesConf) postProcess() error { + regMap := make(map[string][]*Registry) + + for i := range config.Registries { + reg := &config.Registries[i] + // make sure Location and Prefix are valid + var err error + reg.Location, err = parseLocation(reg.Location) + if err != nil { + return err + } + + if reg.Prefix == "" { + reg.Prefix = reg.Location + } else { + reg.Prefix, err = parseLocation(reg.Prefix) + if err != nil { + return err + } + } + + // make sure mirrors are valid + for _, mir := range reg.Mirrors { + mir.Location, err = parseLocation(mir.Location) + if err != nil { + return err + } + } + regMap[reg.Location] = append(regMap[reg.Location], reg) + } + + // Given a registry can be mentioned multiple times (e.g., to have + // multiple prefixes backed by different mirrors), we need to make sure + // there are no conflicts among them. + // + // Note: we need to iterate over the registries array to ensure a + // deterministic behavior which is not guaranteed by maps. + for _, reg := range config.Registries { + others, _ := regMap[reg.Location] + for _, other := range others { + if reg.Insecure != other.Insecure { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + if reg.Blocked != other.Blocked { + msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) + return &InvalidRegistries{s: msg} + } + } + } + + for i := range config.UnqualifiedSearchRegistries { + registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) + if err != nil { + return err + } + if !anchoredDomainRegexp.MatchString(registry) { + return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} + } + config.UnqualifiedSearchRegistries[i] = registry + } + + return nil +} + +// getConfigPath returns the system-registries config path if specified. +// Otherwise, systemRegistriesConfPath is returned. +func getConfigPath(ctx *types.SystemContext) string { + confPath := systemRegistriesConfPath + if ctx != nil { + if ctx.SystemRegistriesConfPath != "" { + confPath = ctx.SystemRegistriesConfPath + } else if ctx.RootForImplicitAbsolutePaths != "" { + confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) + } + } + return confPath +} + +// configMutex is used to synchronize concurrent accesses to configCache. +var configMutex = sync.Mutex{} + +// configCache caches already loaded configs with config paths as keys and is +// used to avoid redudantly parsing configs. Concurrent accesses to the cache +// are synchronized via configMutex. +var configCache = make(map[string]*V2RegistriesConf) + +// InvalidateCache invalidates the registry cache. This function is meant to be +// used for long-running processes that need to reload potential changes made to +// the cached registry config files. +func InvalidateCache() { + configMutex.Lock() + defer configMutex.Unlock() + configCache = make(map[string]*V2RegistriesConf) +} + +// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. +func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { + configPath := getConfigPath(ctx) + + configMutex.Lock() + defer configMutex.Unlock() + // if the config has already been loaded, return the cached registries + if config, inCache := configCache[configPath]; inCache { + return config, nil + } + + // load the config + config, err := loadRegistryConf(configPath) + if err != nil { + // Return an empty []Registry if we use the default config, + // which implies that the config path of the SystemContext + // isn't set. Note: if ctx.SystemRegistriesConfPath points to + // the default config, we will still return an error. + if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { + return &V2RegistriesConf{Registries: []Registry{}}, nil + } + return nil, err + } + + v2Config := &config.V2RegistriesConf + + // backwards compatibility for v1 configs + if config.V1RegistriesConf.Nonempty() { + if config.V2RegistriesConf.Nonempty() { + return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} + } + v2, err := config.V1RegistriesConf.ConvertToV2() + if err != nil { + return nil, err + } + v2Config = v2 + } + + if err := v2Config.postProcess(); err != nil { + return nil, err + } + + // populate the cache + configCache[configPath] = v2Config + return v2Config, nil +} + +// GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. +func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.Registries, nil +} + +// UnqualifiedSearchRegistries returns a list of host[:port] entries to try +// for unqualified image search, in the returned order) +func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + return config.UnqualifiedSearchRegistries, nil +} + +// refMatchesPrefix returns true iff ref, +// which is a registry, repository namespace, repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!), +// matches a Registry.Prefix value. +// (This is split from the caller primarily to make testing easier.) +func refMatchesPrefix(ref, prefix string) bool { + switch { + case len(ref) < len(prefix): + return false + case len(ref) == len(prefix): + return ref == prefix + case len(ref) > len(prefix): + if !strings.HasPrefix(ref, prefix) { + return false + } + c := ref[len(prefix)] + // This allows "example.com:5000" to match "example.com", + // which is unintended; that will get fixed eventually, DON'T RELY + // ON THE CURRENT BEHAVIOR. + return c == ':' || c == '/' || c == '@' + default: + panic("Internal error: impossible comparison outcome") + } +} + +// FindRegistry returns the Registry with the longest prefix for ref, +// which is a registry, repository namespace repository or image reference (as formatted by +// reference.Domain(), reference.Named.Name() or reference.Reference.String() +// — note that this requires the name to start with an explicit hostname!). +// If no Registry prefixes the image, nil is returned. +func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err + } + + reg := Registry{} + prefixLen := 0 + for _, r := range config.Registries { + if refMatchesPrefix(ref, r.Prefix) { + length := len(r.Prefix) + if length > prefixLen { + reg = r + prefixLen = length + } + } + } + if prefixLen != 0 { + return ®, nil + } + return nil, nil +} + +// Loads the registry configuration file from the filesystem and then unmarshals +// it. Returns the unmarshalled object. +func loadRegistryConf(configPath string) (*tomlConfig, error) { + config := &tomlConfig{} + + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + + err = toml.Unmarshal(configBytes, &config) + return config, err +} diff --git a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go new file mode 100644 index 0000000..6785564 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go @@ -0,0 +1,112 @@ +package tlsclientconfig + +import ( + "crypto/tls" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc +func SetupCertificates(dir string, tlsc *tls.Config) error { + logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) + fs, err := ioutil.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + if os.IsPermission(err) { + logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) + return nil + } + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + logrus.Debugf(" crt: %s", fullPath) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + if os.IsNotExist(err) { + // Dangling symbolic link? + // Race with someone who deleted the + // file after we read the directory's + // list of contents? + logrus.Warnf("error reading certificate %q: %v", fullPath, err) + continue + } + return err + } + if tlsc.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return errors.Wrap(err, "unable to get system cert pool") + } + tlsc.RootCAs = systemPool + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf(" cert: %s", fullPath) + if !hasFile(fs, keyName) { + return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf(" key: %s", fullPath) + if !hasFile(fs, certName) { + return errors.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// NewTransport Creates a default transport +func NewTransport() *http.Transport { + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + tr.Dial = proxyDialer.Dial + } + return tr +} diff --git a/vendor/github.com/containers/image/registries.conf b/vendor/github.com/containers/image/registries.conf new file mode 100644 index 0000000..f6735fb --- /dev/null +++ b/vendor/github.com/containers/image/registries.conf @@ -0,0 +1,82 @@ +# For more information on this configuration file, see containers-registries.conf(5). +# +# There are multiple versions of the configuration syntax available, where the +# second iteration is backwards compatible to the first one. Mixing up both +# formats will result in an runtime error. +# +# The initial configuration format looks like this: +# +# Registries to search for images that are not fully-qualified. +# i.e. foobar.com/my_image:latest vs my_image:latest +[registries.search] +registries = [] + +# Registries that do not use TLS when pulling images or uses self-signed +# certificates. +[registries.insecure] +registries = [] + +# Blocked Registries, blocks the `docker daemon` from pulling from the blocked registry. If you specify +# "*", then the docker daemon will only be allowed to pull from registries listed above in the search +# registries. Blocked Registries is deprecated because other container runtimes and tools will not use it. +# It is recommended that you use the trust policy file /etc/containers/policy.json to control which +# registries you want to allow users to pull and push from. policy.json gives greater flexibility, and +# supports all container runtimes and tools including the docker daemon, cri-o, buildah ... +# The atomic CLI `atomic trust` can be used to easily configure the policy.json file. +[registries.block] +registries = [] + +# The second version of the configuration format allows to specify registry +# mirrors: +# +# # An array of host[:port] registries to try when pulling an unqualified image, in order. +# unqualified-search-registries = ["example.com"] +# +# [[registry]] +# # The "prefix" field is used to choose the relevant [[registry]] TOML table; +# # (only) the TOML table with the longest match for the input image name +# # (taking into account namespace/repo/tag/digest separators) is used. +# # +# # If the prefix field is missing, it defaults to be the same as the "location" field. +# prefix = "example.com/foo" +# +# # If true, unencrypted HTTP as well as TLS connections with untrusted +# # certificates are allowed. +# insecure = false +# +# # If true, pulling images with matching names is forbidden. +# blocked = false +# +# # The physical location of the "prefix"-rooted namespace. +# # +# # By default, this equal to "prefix" (in which case "prefix" can be omitted +# # and the [[registry]] TOML table can only specify "location"). +# # +# # Example: Given +# # prefix = "example.com/foo" +# # location = "internal-registry-for-example.net/bar" +# # requests for the image example.com/foo/myimage:latest will actually work with the +# # internal-registry-for-example.net/bar/myimage:latest image. +# location = internal-registry-for-example.com/bar" +# +# # (Possibly-partial) mirrors for the "prefix"-rooted namespace. +# # +# # The mirrors are attempted in the specified order; the first one that can be +# # contacted and contains the image will be used (and if none of the mirrors contains the image, +# # the primary location specified by the "registry.location" field, or using the unmodified +# # user-specified reference, is tried last). +# # +# # Each TOML table in the "mirror" array can contain the following fields, with the same semantics +# # as if specified in the [[registry]] TOML table directly: +# # - location +# # - insecure +# [[registry.mirror]] +# location = "example-mirror-0.local/mirror-for-foo" +# [[registry.mirror]] +# location = "example-mirror-1.local/mirrors/foo" +# insecure = true +# # Given the above, a pull of example.com/foo/image:latest will try: +# # 1. example-mirror-0.local/mirror-for-foo/image:latest +# # 2. example-mirror-1.local/mirrors/foo/image:latest +# # 3. internal-registry-for-example.net/bar/myimage:latest +# # in order, and use the first one that exists. diff --git a/vendor/github.com/containers/image/signature/docker.go b/vendor/github.com/containers/image/signature/docker.go new file mode 100644 index 0000000..16eb3f7 --- /dev/null +++ b/vendor/github.com/containers/image/signature/docker.go @@ -0,0 +1,65 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "fmt" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/opencontainers/go-digest" +) + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := newUntrustedSignature(manifestDigest, dockerReference) + return sig.sign(mech, keyIdentity) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) + if err != nil { + return nil, err + } + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if keyIdentity != expectedKeyIdentity { + return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} + } + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) + if err != nil { + return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} + } + if signedRef.String() != expectedRef.String() { + return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)} + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} + } + return nil + }, + }) + if err != nil { + return nil, err + } + return sig, nil +} diff --git a/vendor/github.com/containers/image/signature/json.go b/vendor/github.com/containers/image/signature/json.go new file mode 100644 index 0000000..9e59286 --- /dev/null +++ b/vendor/github.com/containers/image/signature/json.go @@ -0,0 +1,88 @@ +package signature + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// jsonFormatError is returned when JSON does not match expected format. +type jsonFormatError string + +func (err jsonFormatError) Error() string { + return string(err) +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { + seenKeys := map[string]struct{}{} + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t != json.Delim('{') { + return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + } + for { + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + } + if _, ok := seenKeys[key]; ok { + return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + } + seenKeys[key] = struct{}{} + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return jsonFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return jsonFormatError("Unexpected data after JSON object") + } + return nil +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields +// must be present exactly once, and none other fields are accepted. +func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { + seenKeys := map[string]struct{}{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if valuePtr, ok := exactFields[key]; ok { + seenKeys[key] = struct{}{} + return valuePtr + } + return nil + }); err != nil { + return err + } + for key := range exactFields { + if _, ok := seenKeys[key]; !ok { + return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + } + } + return nil +} diff --git a/vendor/github.com/containers/image/signature/mechanism.go b/vendor/github.com/containers/image/signature/mechanism.go new file mode 100644 index 0000000..bdf26c5 --- /dev/null +++ b/vendor/github.com/containers/image/signature/mechanism.go @@ -0,0 +1,85 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// Each mechanism should eventually be closed by calling Close(). +// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to +// eliminate ambiguities, support CA signatures and perhaps other key properties) +type SigningMechanism interface { + // Close removes resources associated with the mechanism, if any. + Close() error + // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. + SupportsSigning() error + // Sign creates a (non-detached) signature of input using keyIdentity. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) + // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, + // along with a short identifier of the key used for signing. + // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) + // is NOT the same as a "key identity" used in other calls ot this interface, and + // the values may have no recognizable relationship if the public key is not available. + UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) +} + +// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. +type SigningNotSupportedError string + +func (err SigningNotSupportedError) Error() string { + return string(err) +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default +// GPG configuration ($GNUPGHOME / ~/.gnupg) +// The caller must call .Close() on the returned SigningMechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + return newEphemeralGPGSigningMechanism(blob) +} + +// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. + md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("The input is not a signature") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: An error during reading the body can happen only if + // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key + // to decrypt the contents anyway), or + // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. + return nil, "", err + } + + // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints + // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! + return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil +} diff --git a/vendor/github.com/containers/image/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/signature/mechanism_gpgme.go new file mode 100644 index 0000000..4825ab2 --- /dev/null +++ b/vendor/github.com/containers/image/signature/mechanism_gpgme.go @@ -0,0 +1,175 @@ +// +build !containers_image_openpgp + +package signature + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + + "github.com/mtrmac/gpgme" +) + +// A GPG/OpenPGP signing mechanism, implemented using gpgme. +type gpgmeSigningMechanism struct { + ctx *gpgme.Context + ephemeralDir string // If not "", a directory to be removed on Close() +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + ctx, err := newGPGMEContext(optionalDir) + if err != nil { + return nil, err + } + return &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: "", + }, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") + if err != nil { + return nil, nil, err + } + removeDir := true + defer func() { + if removeDir { + os.RemoveAll(dir) + } + }() + ctx, err := newGPGMEContext(dir) + if err != nil { + return nil, nil, err + } + mech := &gpgmeSigningMechanism{ + ctx: ctx, + ephemeralDir: dir, + } + keyIdentities, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + + removeDir = false + return mech, keyIdentities, nil +} + +// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. +func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return ctx, nil +} + +func (m *gpgmeSigningMechanism) Close() error { + if m.ephemeralDir != "" { + os.RemoveAll(m.ephemeralDir) // Ignore an error, if any + } + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); +// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. +func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *gpgmeSigningMechanism) SupportsSigning() error { + return nil +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/signature/mechanism_openpgp.go new file mode 100644 index 0000000..eccd610 --- /dev/null +++ b/vendor/github.com/containers/image/signature/mechanism_openpgp.go @@ -0,0 +1,159 @@ +// +build containers_image_openpgp + +package signature + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + "github.com/containers/storage/pkg/homedir" + "golang.org/x/crypto/openpgp" +) + +// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. +type openpgpSigningMechanism struct { + keyring openpgp.EntityList +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +// The caller must call .Close() on the returned SigningMechanism. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + + gpgHome := optionalDir + if gpgHome == "" { + gpgHome = os.Getenv("GNUPGHOME") + if gpgHome == "" { + gpgHome = path.Join(homedir.Get(), ".gnupg") + } + } + + pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + } else { + _, err := m.importKeysFromBytes(pubring) + if err != nil { + return nil, err + } + } + return m, nil +} + +// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which +// recognizes _only_ public keys from the supplied blob, and returns the identities +// of these keys. +// The caller must call .Close() on the returned SigningMechanism. +func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { + m := &openpgpSigningMechanism{ + keyring: openpgp.EntityList{}, + } + keyIdentities, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + return m, keyIdentities, nil +} + +func (m *openpgpSigningMechanism) Close() error { + return nil +} + +// importKeysFromBytes imports public keys from the supplied blob and returns their identities. +// The blob is assumed to have an appropriate format (the caller is expected to know which one). +func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { + keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) + if err != nil { + k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) + if e2 != nil { + return nil, err // The original error -- FIXME: is this better? + } + keyring = k + } + + keyIdentities := []string{} + for _, entity := range keyring { + if entity.PrimaryKey == nil { + // Coverage: This should never happen, openpgp.ReadEntity fails with a + // openpgp.errors.StructuralError instead of returning an entity with this + // field set to nil. + continue + } + // Uppercase the fingerprint to be compatible with gpgme + keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) + m.keyring = append(m.keyring, entity) + } + return keyIdentities, nil +} + +// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. +func (m *openpgpSigningMechanism) SupportsSigning() error { + return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") +} + +// Verify parses unverifiedSignature and returns the content and the signer's identity +func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) + if err != nil { + return nil, "", err + } + if !md.IsSigned { + return nil, "", errors.New("not signed") + } + content, err := ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted + // (and possibly also signed, but it _must_ be encrypted) and the signing + // “modification detection code” detects a mismatch. But in that case, + // we would expect the signature verification to fail as well, and that is checked + // first. Besides, we are not supplying any decryption keys, so we really + // can never reach this “encrypted data MDC mismatch” path. + return nil, "", err + } + if md.SignatureError != nil { + return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) + } + if md.SignedBy == nil { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} + } + if md.Signature != nil { + if md.Signature.SigLifetimeSecs != nil { + expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) + if time.Now().After(expiry) { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + } + } + } else if md.SignatureV3 == nil { + // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, + // or sets md.SignatureError. + return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} + } + + // Uppercase the fingerprint to be compatible with gpgme + return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil +} + +// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, +// along with a short identifier of the key used for signing. +// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) +// is NOT the same as a "key identity" used in other calls ot this interface, and +// the values may have no recognizable relationship if the public key is not available. +func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { + return gpgUntrustedSignatureContents(untrustedSignature) +} diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go new file mode 100644 index 0000000..12398e3 --- /dev/null +++ b/vendor/github.com/containers/image/signature/policy_config.go @@ -0,0 +1,688 @@ +// policy_config.go hanles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// sys should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(sys *types.SystemContext) (*Policy, error) { + return NewPolicyFromFile(defaultPolicyPath(sys)) +} + +// defaultPolicyPath returns a path to the default policy of the system. +func defaultPolicyPath(sys *types.SystemContext) string { + if sys != nil { + if sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + } + return systemDefaultPolicyPath +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // transport can be nil + transport := transports.Get(key) + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport if not nil. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" && m.transport != nil { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + } + if len(keyPath) > 0 && len(keyData) > 0 { + return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyData = false, false + var signedIdentity json.RawMessage + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && gotKeyData: + return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") + case gotKeyPath && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyData: + return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") + default: // Coverage: This should never happen + return errors.Errorf("Impossible keyPath/keyData presence combination!?") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "baseLayerIdentity": &baseLayerIdentity, + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it resturns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNormalizedNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerReference": &tmp.DockerReference, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "type": &tmp.Type, + "dockerRepository": &tmp.DockerRepository, + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/github.com/containers/image/signature/policy_eval.go b/vendor/github.com/containers/image/signature/policy_eval.go new file mode 100644 index 0000000..b66ece4 --- /dev/null +++ b/vendor/github.com/containers/image/signature/policy_eval.go @@ -0,0 +1,289 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the fuctions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "context" + + "github.com/containers/image/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for futher processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInvalid policyContextState = "" + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeContextState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for futher processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: rename Signatures to UnverifiedSignatures + // FIXME: pass context.Context + unverifiedSignatures, err := image.Signatures(ctx) + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(ctx, image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go new file mode 100644 index 0000000..54c6dc1 --- /dev/null +++ b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go @@ -0,0 +1,20 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "context" + + "github.com/containers/image/types" + "github.com/sirupsen/logrus" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/github.com/containers/image/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/signature/policy_eval_signedby.go new file mode 100644 index 0000000..d59ffa1 --- /dev/null +++ b/vendor/github.com/containers/image/signature/policy_eval_signedby.go @@ -0,0 +1,131 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/pkg/errors" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) + } + + if pr.KeyPath != "" && pr.KeyData != nil { + return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) + } + // FIXME: move this to per-context initialization + var data []byte + if pr.KeyData != nil { + data = pr.KeyData + } else { + d, err := ioutil.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, nil, err + } + data = d + } + + // FIXME: move this to per-context initialization + mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) + if err != nil { + return sarRejected, nil, err + } + defer mech.Close() + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + for _, trustedIdentity := range trustedIdentities { + if keyIdentity == trustedIdentity { + return nil + } + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + // FIXME: pass context.Context + sigs, err := image.Signatures(ctx) + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple.go b/vendor/github.com/containers/image/signature/policy_eval_simple.go new file mode 100644 index 0000000..b0f2fff --- /dev/null +++ b/vendor/github.com/containers/image/signature/policy_eval_simple.go @@ -0,0 +1,29 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "context" + "fmt" + + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/github.com/containers/image/signature/policy_reference_match.go b/vendor/github.com/containers/image/signature/policy_reference_match.go new file mode 100644 index 0000000..a8dad67 --- /dev/null +++ b/vendor/github.com/containers/image/signature/policy_reference_match.go @@ -0,0 +1,101 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} + +func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNormalizedNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNormalizedNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} diff --git a/vendor/github.com/containers/image/signature/policy_types.go b/vendor/github.com/containers/image/signature/policy_types.go new file mode 100644 index 0000000..4cd770f --- /dev/null +++ b/vendor/github.com/containers/image/signature/policy_types.go @@ -0,0 +1,152 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/policy.json.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "match-exact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} diff --git a/vendor/github.com/containers/image/signature/signature.go b/vendor/github.com/containers/image/signature/signature.go new file mode 100644 index 0000000..41f13f7 --- /dev/null +++ b/vendor/github.com/containers/image/signature/signature.go @@ -0,0 +1,280 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! + +package signature + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/pkg/errors" + + "github.com/containers/image/version" + "github.com/opencontainers/go-digest" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +// Signature is a parsed content of a signature. +// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// untrustedSignature is a parsed content of a signature. +type untrustedSignature struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + UntrustedTimestamp *int64 +} + +// UntrustedSignatureInformation is information available in an untrusted signature. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +type UntrustedSignatureInformation struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + UntrustedTimestamp *time.Time + UntrustedShortKeyIdentifier string +} + +// newUntrustedSignature returns an untrustedSignature object with +// the specified primary contents and appropriate metadata. +func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "atomic " + version.Version + timestamp := time.Now().Unix() + return untrustedSignature{ + UntrustedDockerManifestDigest: dockerManifestDigest, + UntrustedDockerReference: dockerReference, + UntrustedCreatorID: &creatorID, + UntrustedTimestamp: ×tamp, + } +} + +// Compile-time check that untrustedSignature implements json.Marshaler +var _ json.Marshaler = (*untrustedSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s untrustedSignature) MarshalJSON() ([]byte, error) { + if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]interface{}{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + } + optional := map[string]interface{}{} + if s.UntrustedCreatorID != nil { + optional["creator"] = *s.UntrustedCreatorID + } + if s.UntrustedTimestamp != nil { + optional["timestamp"] = *s.UntrustedTimestamp + } + signature := map[string]interface{}{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that untrustedSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*untrustedSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *untrustedSignature) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if _, ok := err.(jsonFormatError); ok { + err = InvalidSignatureError{msg: err.Error()} + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. +// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. +func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore interface{} + return &ignore + } + }); err != nil { + return err + } + if gotCreatorID { + s.UntrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} + } + s.UntrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != signatureType { + return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} + } + + var digestString string + if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.UntrustedDockerManifestDigest = digest.Digest(digestString) + + return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + "docker-reference": &s.UntrustedDockerReference, + }) +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +// (If it seems surprising that this is a method on untrustedSignature, note that there +// isn’t a good reason to think that a key used by the user is trusted by any component +// of the system just because it is a private key — actually the presence of a private key +// on the system increases the likelihood of an a successful attack on that private key +// on that particular system.) +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature untrustedSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { + return nil, err + } + // signatureAcceptanceRules have accepted this value. + return &Signature{ + DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, + DockerReference: unmatchedSignature.UntrustedDockerReference, + }, nil +} + +// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, +// WITHOUT doing any cryptographic verification. +// This may be useful when debugging signature verification failures, +// or when managing a set of signatures on a single image. +// +// WARNING: Do not use the contents of this for ANY security decisions, +// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. +// There is NO REASON to expect the values to be correct, or not intentionally misleading +// (including things like “✅ Verified by $authority”) +func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { + // NOTE: This should eventualy do format autodetection. + mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) + if err != nil { + return nil, err + } + defer mech.Close() + + untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) + if err != nil { + return nil, err + } + var untrustedDecodedContents untrustedSignature + if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + + var timestamp *time.Time // = nil + if untrustedDecodedContents.UntrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) + timestamp = &ts + } + return &UntrustedSignatureInformation{ + UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, + UntrustedTimestamp: timestamp, + UntrustedShortKeyIdentifier: shortKeyIdentifier, + }, nil +} diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go new file mode 100644 index 0000000..b39d2bc --- /dev/null +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -0,0 +1,949 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/internal/tmpdir" + "github.com/containers/image/manifest" + "github.com/containers/image/pkg/blobinfocache/none" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrBlobDigestMismatch is returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + ErrBlobDigestMismatch = errors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = errors.New("blob size mismatch") + // ErrNoManifestLists is returned when GetManifest() is called. + // with a non-nil instanceDigest. + ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + image *storage.Image + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + +// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably; +// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey +func manifestBigDataKey(digest digest.Digest) string { + return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() +} + +// newImageSource sets up an image for reading. +func newImageSource(imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. + img, err := imageRef.resolveImage() + if err != nil { + return nil, err + } + + // Build the reader object. + image := &storageImageSource{ + imageRef: imageRef, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + } + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "error decoding metadata for source image") + } + } + return image, nil +} + +// Reference returns the image reference that we used to find this image. +func (s *storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *storageImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { + if info.Digest == image.GzippedEmptyLayerDigest { + return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + s.getBlobMutex.Lock() + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + return nil, "", ErrNoManifestLists + } + if len(s.cachedManifest) == 0 { + // The manifest is stored as a big data item. + // Prefer the manifest corresponding to the user-specified digest, if available. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + key := manifestBigDataKey(digested.Digest()) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key + return nil, "", err + } + if err == nil { + s.cachedManifest = blob + } + } + } + // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. + // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) + } + + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + // This is actually a compressed type, but there's no uncompressed type defined + uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType + } + + physicalBlobInfos := []types.BlobInfo{} + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID) + } + if layer.UncompressedDigest == "" { + return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + if layer.UncompressedSize < 0 { + return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + layerID = layer.Parent + } + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + if instanceDigest != nil { + return nil, ErrNoManifestLists + } + var offset int + sigslice := [][]byte{} + signature := []byte{} + if len(s.SignatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, "signatures") + if err != nil { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.image.ID) + } + signature = signatureBlob + } + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } + image := &storageImageDestination{ + imageRef: imageRef, + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + } + return image, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (s *storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + return os.RemoveAll(s.directory) +} + +func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { + // We ultimately have to decompress layers to populate trees on disk, + // so callers shouldn't bother compressing them before handing them to + // us, if they're not already compressed. + return types.PreserveOriginal +} + +func (s *storageImageDestination) computeNextBlobCacheFile() string { + return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobinfo.Digest.Validate() == nil { + if a := blobinfo.Digest.Algorithm(); a.Available() { + hasher = a.Digester() + } + } + diffID := digest.Canonical.Digester() + filename := s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() + counter := ioutils.NewWriteCounter(hasher.Hash()) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { + return errorBlobInfo, ErrBlobDigestMismatch + } + if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + // Record information about the blob. + s.putBlobMutex.Lock() + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + s.putBlobMutex.Unlock() + blobDigest := blobinfo.Digest + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } + // This is safe because we have just computed both values ourselves. + cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + return types.BlobInfo{ + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, + }, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.putBlobMutex.Lock() + defer s.putBlobMutex.Unlock() + if blobinfo.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) + } + if err := blobinfo.Digest.Validate(); err != nil { + return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) + } + + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: size, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].CompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if canSubstitute. + if canSubstitute { + if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest) + } + if len(layers) > 0 { + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + } + } + + // Nope, we don't have it. + return false, types.BlobInfo{}, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m := m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + for i, compat := range m.ExtractedV1Compatibility { + if compat.ThrowAway { + continue + } + blobSum := m.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. + default: + return "" + } + id, err := m.ImageID(diffIDs) + if err != nil { + return "" + } + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") +} + +func (s *storageImageDestination) Commit(ctx context.Context) error { + // Find the list of layer blobs. + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "error parsing manifest") + } + layerBlobs := man.LayerInfos() + // Extract or find the layers. + lastLayer := "" + for _, blob := range layerBlobs { + if blob.EmptyLayer { + continue + } + + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seeen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + filename, ok := s.filenames[blob.Digest] + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return errors.Wrapf(err, "error creating temporary file %q", filename) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.filenames[blob.Digest] = filename + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return errors.Wrapf(err, "error opening file %q", filename) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + } + + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = *inspect.Created + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + if err != nil { + if errors.Cause(err) != storage.ErrDuplicateID { + logrus.Debugf("error creating image: %q", err) + return errors.Wrapf(err, "error creating image %q", intendedID) + } + img, err = s.imageRef.transport.store.Image(intendedID) + if err != nil { + return errors.Wrapf(err, "error reading image %q", intendedID) + } + if img.TopLayer != lastLayer { + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) + } + logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) + } else { + logrus.Debugf("created new image ID %q", img.ID) + } + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} + } + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) + } + } + // Set the reference's name on the image. + if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { + names := []string{} + if name != nil { + names = append(names, name.String()) + } + if len(oldNames) > 0 { + names = append(names, oldNames...) + } + if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) + } + logrus.Debugf("set names of image %q to %v", img.ID, names) + } + // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + manifestDigest, err := manifest.Digest(s.manifest) + if err != nil { + return errors.Wrapf(err, "error computing manifest digest") + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return err + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return err + } + // Save the signatures, if we have any. + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err + } + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return err + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return err + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + return nil +} + +var manifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return manifestMIMETypes +} + +// PutManifest writes the manifest to the destination. +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest()) + if err != nil { + return err + } + if !matches { + return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest()) + } + } + } + + s.manifest = make([]byte, len(manifestBlob)) + copy(s.manifest, manifestBlob) + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. +func (s *storageImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { + return true // Yes, we want the unmodified manifest +} + +// PutSignatures records the image's signatures for committing as a single data blob. +func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + s.signatures = sigblob + s.SignatureSizes = sizes + return nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image %q", s.image.ID) + } + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) + if err != nil { + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Walk the layer list. + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { + src, err := newImageSource(s) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImageCloser{ImageCloser: img, size: size}, nil +} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go new file mode 100644 index 0000000..c046d9f --- /dev/null +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -0,0 +1,225 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "context" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +// Either "named" or "id" must be set. +type storageReference struct { + transport storageTransport + named reference.Named // may include a tag and/or a digest + id string +} + +func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { + if named == nil && id == "" { + return nil, ErrInvalidReference + } + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + named: named, + id: id, + }, nil +} + +// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref +func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { + repo := ref.Name() + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if named.Name() == repo { + return true + } + } + } + return false +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID, and return the image. +func (s *storageReference) resolveImage() (*storage.Image, error) { + var loadedImage *storage.Image + if s.id == "" && s.named != nil { + // Look for an image that has the expanded reference name as an explicit Name value. + image, err := s.transport.store.Image(s.named.String()) + if image != nil && err == nil { + loadedImage = image + s.id = image.ID + } + } + if s.id == "" && s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + images, err := s.transport.store.ImagesByDigest(digested.Digest()) + if err == nil && len(images) > 0 { + for _, image := range images { + if imageMatchesRepo(image, s.named) { + loadedImage = image + s.id = image.ID + break + } + } + } + } + } + if s.id == "" { + logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) + } + if loadedImage == nil { + img, err := s.transport.store.Image(s.id) + if err != nil { + return nil, errors.Wrapf(err, "error reading image %q", s.id) + } + loadedImage = img + } + if s.named != nil { + if !imageMatchesRepo(loadedImage, s.named) { + logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) + return nil, ErrNoSuchImage + } + } + // Default to having the image digest that we hand back match the most recently + // added manifest... + if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { + loadedImage.Digest = digest + } + // ... unless the named reference says otherwise, and it matches one of the digests + // in the image. For those cases, set the Digest field to that value, for the + // sake of older consumers that don't know there's a whole list in there now. + if s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + for _, digest := range loadedImage.Digests { + if digest == digested.Digest() { + loadedImage.Digest = digest + break + } + } + } + } + return loadedImage, nil +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + defaultUIDMap: s.transport.defaultUIDMap, + defaultGIDMap: s.transport.defaultGIDMap, + } +} + +// Return a name with a tag or digest, if we have either, else return it bare. +func (s storageReference) DockerReference() reference.Named { + return s.named +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + optionsList := "" + options := s.transport.store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +func (s storageReference) PolicyConfigurationIdentity() string { + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" + namespaces := []string{} + if s.named != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.named.String()) + } + tagged, isTagged := s.named.(reference.Tagged) + _, isDigested := s.named.(reference.Digested) + if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. + namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) + } + components := strings.Split(s.named.Name(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, s) +} + +func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + img, err := s.resolveImage() + if err != nil { + return err + } + layers, err := s.transport.store.DeleteImage(img.ID, true) + if err == nil { + logrus.Debugf("deleted image %q", img.ID) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(s) +} + +func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(s) +} diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go new file mode 100644 index 0000000..c9a05e6 --- /dev/null +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -0,0 +1,366 @@ +// +build !containers_image_storage_stub + +package storage + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // SetDefaultUIDMap sets the default UID map to use when opening stores. + SetDefaultUIDMap(idmap []idtools.IDMap) + // SetDefaultGIDMap sets the default GID map to use when opening stores. + SetDefaultGIDMap(idmap []idtools.IDMap) + // DefaultUIDMap returns the default UID map used when opening stores. + DefaultUIDMap() []idtools.IDMap + // DefaultGIDMap returns the default GID map used when opening stores. + DefaultGIDMap() []idtools.IDMap +} + +type storageTransport struct { + store storage.Store + defaultUIDMap []idtools.IDMap + defaultGIDMap []idtools.IDMap +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// SetDefaultUIDMap sets the default UID map to use when opening stores. +func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { + s.defaultUIDMap = idmap +} + +// SetDefaultGIDMap sets the default GID map to use when opening stores. +func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { + s.defaultGIDMap = idmap +} + +// DefaultUIDMap returns the default UID map used when opening stores. +func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { + return s.defaultUIDMap +} + +// DefaultGIDMap returns the default GID map used when opening stores. +func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { + return s.defaultGIDMap +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + if ref == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) + } + ref = ref[closeIndex+1:] + } + + // The reference may end with an image ID. Image IDs and digests use the same "@" separator; + // here we only peel away an image ID, and leave digests alone. + split := strings.LastIndex(ref, "@") + id := "" + if split != -1 { + possibleID := ref[split+1:] + if possibleID == "" { + return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) + } + // If it looks like a digest, leave it alone for now. + if _, err := digest.Parse(possibleID); err != nil { + // Otherwise… + if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { + id = possibleID // … it is a full ID + } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { + // … it is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) + } + // We have recognized an image ID; peel it off. + ref = ref[:split] + } + } + + // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + var named reference.Named + // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been + // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. + if ref != "" { + var err error + named, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + named = reference.TagNameOnly(named) + } + + result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) + if err != nil { + return nil, err + } + logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) + return result, nil +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + options, err := storage.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + options.UIDMap = s.defaultUIDMap + options.GIDMap = s.defaultGIDMap + store, err := storage.GetStore(options) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or +// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", +// tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + var store storage.Store + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + // Peel off a "driver@" from the start. + driverInfo := "" + driverSplit := strings.SplitN(storeSpec, "@", 2) + if len(driverSplit) != 2 { + if storeSpec == "" { + return nil, ErrInvalidReference + } + } else { + driverInfo = driverSplit[0] + if driverInfo == "" { + return nil, ErrInvalidReference + } + storeSpec = driverSplit[1] + if storeSpec == "" { + return nil, ErrInvalidReference + } + } + // Peel off a ":options" from the end. + var options []string + optionsSplit := strings.SplitN(storeSpec, ":", 2) + if len(optionsSplit) == 2 { + options = strings.Split(optionsSplit[1], ",") + storeSpec = optionsSplit[0] + } + // Peel off a "+runroot" from the new end. + runRootInfo := "" + runRootSplit := strings.SplitN(storeSpec, "+", 2) + if len(runRootSplit) == 2 { + runRootInfo = runRootSplit[1] + storeSpec = runRootSplit[0] + } + // The rest is our graph root. + rootInfo := storeSpec + // Check that any paths are absolute paths. + if rootInfo != "" && !filepath.IsAbs(rootInfo) { + return nil, ErrPathNotAbsolute + } + if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: driverInfo, + GraphRoot: rootInfo, + RunRoot: runRootInfo, + GraphDriverOptions: options, + UIDMap: s.defaultUIDMap, + GIDMap: s.defaultGIDMap, + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // We didn't have a store spec, so use the default. + store2, err := s.GetStore() + if err != nil { + return nil, err + } + store = store2 + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref != nil { + if img, err := store.Image(dref.String()); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + tmpRef := *sref + if img, err := tmpRef.resolveImage(); err == nil { + return img, nil + } + } + return nil, storage.ErrImageUnknown +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: scope specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + + fields := strings.SplitN(scope, "@", 3) + switch len(fields) { + case 1: // name only + case 2: // name:tag@ID or name[:tag]@digest + if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { + if _, digestErr := digest.Parse(fields[1]); digestErr != nil { + return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) + } + } + case 3: // name[:tag]@digest@ID + if _, err := digest.Parse(fields[1]); err != nil { + return err + } + if _, err := digest.Parse("sha256:" + fields[2]); err != nil { + return err + } + default: // Coverage: This should never happen + return errors.New("Internal error: unexpected number of fields form strings.SplitN") + } + // As for field[0], if it is non-empty at all: + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} diff --git a/vendor/github.com/containers/image/tarball/doc.go b/vendor/github.com/containers/image/tarball/doc.go new file mode 100644 index 0000000..a6ced5a --- /dev/null +++ b/vendor/github.com/containers/image/tarball/doc.go @@ -0,0 +1,48 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "fmt" +// +// cp "github.com/containers/image/copy" +// "github.com/containers/image/tarball" +// "github.com/containers/image/transports/alltransports" +// +// imgspecv1 "github.com/containers/image/transports/alltransports" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// err = cp.Image(nil, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/tarball/tarball_reference.go new file mode 100644 index 0000000..fc1230a --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_reference.go @@ -0,0 +1,94 @@ +package tarball + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + transport types.ImageTransport + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return r.transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) +} diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go new file mode 100644 index 0000000..76e3e75 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_src.go @@ -0,0 +1,268 @@ +package tarball + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/types" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := pgzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + n, err := io.Copy(ioutil.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (is *tarballImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (*tarballImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/tarball/tarball_transport.go b/vendor/github.com/containers/image/tarball/tarball_transport.go new file mode 100644 index 0000000..72558b5 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_transport.go @@ -0,0 +1,66 @@ +package tarball + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + ref := &tarballReference{ + transport: t, + filenames: filenames, + stdin: stdin, + } + return ref, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go new file mode 100644 index 0000000..2335c56 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/alltransports.go @@ -0,0 +1,46 @@ +package alltransports + +import ( + "strings" + + // register all known transports + // NOTE: Make sure docs/policy.json.md is updated when adding or updating + // a transport. + _ "github.com/containers/image/directory" + _ "github.com/containers/image/docker" + _ "github.com/containers/image/docker/archive" + _ "github.com/containers/image/oci/archive" + _ "github.com/containers/image/oci/layout" + _ "github.com/containers/image/openshift" + _ "github.com/containers/image/tarball" + // The ostree transport is registered by ostree*.go + // The storage transport is registered by storage*.go + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/pkg/errors" +) + +// ParseImageName converts a URL-like image name to a types.ImageReference. +func ParseImageName(imgName string) (types.ImageReference, error) { + // Keep this in sync with TransportFromImageName! + parts := strings.SplitN(imgName, ":", 2) + if len(parts) != 2 { + return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + } + transport := transports.Get(parts[0]) + if transport == nil { + return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + } + return transport.ParseReference(parts[1]) +} + +// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when +// the transport is unknown or when the input is invalid. +func TransportFromImageName(imageName string) types.ImageTransport { + // Keep this in sync with ParseImageName! + parts := strings.SplitN(imageName, ":", 2) + if len(parts) == 2 { + return transports.Get(parts[0]) + } + return nil +} diff --git a/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go new file mode 100644 index 0000000..6d2ba4b --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/docker_daemon.go @@ -0,0 +1,8 @@ +// +build !containers_image_docker_daemon_stub + +package alltransports + +import ( + // Register the docker-daemon transport + _ "github.com/containers/image/docker/daemon" +) diff --git a/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go new file mode 100644 index 0000000..27f3850 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_docker_daemon_stub + +package alltransports + +import "github.com/containers/image/transports" + +func init() { + transports.Register(transports.NewStubTransport("docker-daemon")) +} diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/transports/alltransports/ostree.go new file mode 100644 index 0000000..4a3b29a --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/ostree.go @@ -0,0 +1,8 @@ +// +build !containers_image_ostree_stub,linux + +package alltransports + +import ( + // Register the ostree transport + _ "github.com/containers/image/ostree" +) diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go new file mode 100644 index 0000000..48fcaa5 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_ostree_stub !linux + +package alltransports + +import "github.com/containers/image/transports" + +func init() { + transports.Register(transports.NewStubTransport("ostree")) +} diff --git a/vendor/github.com/containers/image/transports/alltransports/storage.go b/vendor/github.com/containers/image/transports/alltransports/storage.go new file mode 100644 index 0000000..a867c66 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage.go @@ -0,0 +1,8 @@ +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/storage" +) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go new file mode 100644 index 0000000..4ac684e --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/transports/stub.go b/vendor/github.com/containers/image/transports/stub.go new file mode 100644 index 0000000..087f69b --- /dev/null +++ b/vendor/github.com/containers/image/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go new file mode 100644 index 0000000..687d0a4 --- /dev/null +++ b/vendor/github.com/containers/image/transports/transports.go @@ -0,0 +1,90 @@ +package transports + +import ( + "fmt" + "sort" + "sync" + + "github.com/containers/image/types" +) + +// knownTransports is a registry of known ImageTransport instances. +type knownTransports struct { + transports map[string]types.ImageTransport + mu sync.Mutex +} + +func (kt *knownTransports) Get(k string) types.ImageTransport { + kt.mu.Lock() + t := kt.transports[k] + kt.mu.Unlock() + return t +} + +func (kt *knownTransports) Remove(k string) { + kt.mu.Lock() + delete(kt.transports, k) + kt.mu.Unlock() +} + +func (kt *knownTransports) Add(t types.ImageTransport) { + kt.mu.Lock() + defer kt.mu.Unlock() + name := t.Name() + if t := kt.transports[name]; t != nil { + panic(fmt.Sprintf("Duplicate image transport name %s", name)) + } + kt.transports[name] = t +} + +var kt *knownTransports + +func init() { + kt = &knownTransports{ + transports: make(map[string]types.ImageTransport), + } +} + +// Get returns the transport specified by name or nil when unavailable. +func Get(name string) types.ImageTransport { + return kt.Get(name) +} + +// Delete deletes a transport from the registered transports. +func Delete(name string) { + kt.Remove(name) +} + +// Register registers a transport. +func Register(t types.ImageTransport) { + kt.Add(t) +} + +// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that +// ParseImageName(ImageName(reference)) returns an equivalent reference. +// +// This is the generally recommended way to refer to images in the UI. +// +// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +func ImageName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.StringWithinTransport() +} + +// ListNames returns a list of non deprecated transport names. +// Deprecated transports can be used, but are not presented to users. +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + deprecated := map[string]bool{ + "atomic": true, + } + var names []string + for _, transport := range kt.transports { + if !deprecated[transport.Name()] { + names = append(names, transport.Name()) + } + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go new file mode 100644 index 0000000..7895043 --- /dev/null +++ b/vendor/github.com/containers/image/types/types.go @@ -0,0 +1,520 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/docker/reference" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cahce should just fall back to copying the blobs the usual way. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context) ([]BlobInfo, error) +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte) error + PutSignatures(ctx context.Context, signatures [][]byte) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Os string + Layers []string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b == true { + o = OptionalBoolTrue + } + return o +} + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // If not "", overrides the default path for the authentication file + AuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when talking to a Docker Registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + DockerAuthConfig *DockerAuthConfig + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool +} + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + Artifact BlobInfo + Offset uint64 +} diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf new file mode 100644 index 0000000..438cab1 --- /dev/null +++ b/vendor/github.com/containers/image/vendor.conf @@ -0,0 +1,51 @@ +github.com/containers/image + +github.com/sirupsen/logrus v1.0.0 +github.com/containers/storage v1.12.2 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 +github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 +github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341 +github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 +github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 +github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 +github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 +github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c +github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453 +github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade +github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 +github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 +github.com/opencontainers/image-spec v1.0.0 +github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3 +github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 +github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302 +github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/vbatts/tar-split v0.10.2 +golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 +golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe +golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e +golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 +gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a +k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6 +github.com/xeipuuv/gojsonschema master +github.com/xeipuuv/gojsonreference master +github.com/xeipuuv/gojsonpointer master +github.com/tchap/go-patricia v2.2.6 +github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e +github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 +github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce +github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/pquerna/ffjson master +github.com/syndtr/gocapability master +github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175 +github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a +github.com/ulikunitz/xz v0.5.4 +github.com/etcd-io/bbolt v1.3.2 +github.com/klauspost/pgzip v1.2.1 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 +github.com/vbauerster/mpb v3.3.4 +github.com/mattn/go-isatty v0.0.4 +github.com/VividCortex/ewma v1.1.1 diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go new file mode 100644 index 0000000..62b2c8b --- /dev/null +++ b/vendor/github.com/containers/image/version/version.go @@ -0,0 +1,18 @@ +package version + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 2 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE new file mode 100644 index 0000000..8f3fee6 --- /dev/null +++ b/vendor/github.com/containers/storage/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE new file mode 100644 index 0000000..8a37c1c --- /dev/null +++ b/vendor/github.com/containers/storage/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md new file mode 100644 index 0000000..fef46a6 --- /dev/null +++ b/vendor/github.com/containers/storage/README.md @@ -0,0 +1,46 @@ +`storage` is a Go library which aims to provide methods for storing filesystem +layers, container images, and containers. A `containers-storage` CLI wrapper +is also included for manual and scripting use. + +To build the CLI wrapper, use 'make binary'. + +Operations which use VMs expect to launch them using 'vagrant', defaulting to +using its 'libvirt' provider. The boxes used are also available for the +'virtualbox' provider, and can be selected by setting $VAGRANT_PROVIDER to +'virtualbox' before kicking off the build. + +The library manages three types of items: layers, images, and containers. + +A *layer* is a copy-on-write filesystem which is notionally stored as a set of +changes relative to its *parent* layer, if it has one. A given layer can only +have one parent, but any layer can be the parent of multiple layers. Layers +which are parents of other layers should be treated as read-only. + +An *image* is a reference to a particular layer (its _top_ layer), along with +other information which the library can manage for the convenience of its +caller. This information typically includes configuration templates for +running a binary contained within the image's layers, and may include +cryptographic signatures. Multiple images can reference the same layer, as the +differences between two images may not be in their layer contents. + +A *container* is a read-write layer which is a child of an image's top layer, +along with information which the library can manage for the convenience of its +caller. This information typically includes configuration information for +running the specific container. Multiple containers can be derived from a +single image. + +Layers, images, and containers are represented primarily by 32 character +hexadecimal IDs, but items of each kind can also have one or more arbitrary +names attached to them, which the library will automatically resolve to IDs +when they are passed in to API calls which expect IDs. + +The library can store what it calls *metadata* for each of these types of +items. This is expected to be a small piece of data, since it is cached in +memory and stored along with the library's own bookkeeping information. + +Additionally, the library can store one or more of what it calls *big data* for +images and containers. This is a named chunk of larger data, which is only in +memory when it is being read from or being written to its own disk file. + +**[Contributing](CONTRIBUTING.md)** +Information about contributing to this project. diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go new file mode 100644 index 0000000..e695523 --- /dev/null +++ b/vendor/github.com/containers/storage/containers.go @@ -0,0 +1,604 @@ +package storage + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// A Container is a reference to a read-write layer with metadata. +type Container struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // container can be referred to by its ID or any of its names. Names + // are unique among containers. + Names []string `json:"names,omitempty"` + + // ImageID is the ID of the image which was used to create the container. + ImageID string `json:"image"` + + // LayerID is the ID of the read-write layer for the container itself. + // It is assumed that the image's top layer is the parent of the container's + // read-write layer. + LayerID string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this container was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // UIDMap and GIDMap are used for setting up a container's root + // filesystem for use inside of a user namespace where UID mapping is + // being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ContainerStore provides bookkeeping for information about Containers. +type ContainerStore interface { + FileBasedStore + MetadataStore + ContainerBigDataStore + FlaggableStore + + // Create creates a container that has a specified ID (or generates a + // random one if an empty value is supplied) and optional names, + // based on the specified image, using the specified layer as its + // read-write layer. + // The maps in the container's options structure are recorded for the + // convenience of the caller, nothing more. + Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // SetNames updates the list of names associated with the container + // with the specified ID. + SetNames(id string, names []string) error + + // Get retrieves information about a container given an ID or name. + Get(id string) (*Container, error) + + // Exists checks if there is a container with the given ID or name. + Exists(id string) bool + + // Delete removes the record of the container. + Delete(id string) error + + // Wipe removes records of all containers. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Containers returns a slice enumerating the known containers. + Containers() ([]Container, error) +} + +type containerStore struct { + lockfile Locker + dir string + containers []*Container + idindex *truncindex.TruncIndex + byid map[string]*Container + bylayer map[string]*Container + byname map[string]*Container +} + +func copyContainer(c *Container) *Container { + return &Container{ + ID: c.ID, + Names: copyStringSlice(c.Names), + ImageID: c.ImageID, + LayerID: c.LayerID, + Metadata: c.Metadata, + BigDataNames: copyStringSlice(c.BigDataNames), + BigDataSizes: copyStringInt64Map(c.BigDataSizes), + BigDataDigests: copyStringDigestMap(c.BigDataDigests), + Created: c.Created, + UIDMap: copyIDMap(c.UIDMap), + GIDMap: copyIDMap(c.GIDMap), + Flags: copyStringInterfaceMap(c.Flags), + } +} + +func (c *Container) MountLabel() string { + if label, ok := c.Flags["MountLabel"].(string); ok { + return label + } + return "" +} + +func (c *Container) ProcessLabel() string { + if label, ok := c.Flags["ProcessLabel"].(string); ok { + return label + } + return "" +} + +func (c *Container) MountOpts() []string { + if mountOpts, ok := c.Flags["MountOpts"].([]string); ok { + return mountOpts + } + return nil +} + +func (r *containerStore) Containers() ([]Container, error) { + containers := make([]Container, len(r.containers)) + for i := range r.containers { + containers[i] = *copyContainer(r.containers[i]) + } + return containers, nil +} + +func (r *containerStore) containerspath() string { + return filepath.Join(r.dir, "containers.json") +} + +func (r *containerStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *containerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *containerStore) Load() error { + needSave := false + rpath := r.containerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + containers := []*Container{} + layers := make(map[string]*Container) + idlist := []string{} + ids := make(map[string]*Container) + names := make(map[string]*Container) + if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(containers)) + for n, container := range containers { + idlist = append(idlist, container.ID) + ids[container.ID] = containers[n] + layers[container.LayerID] = containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = containers[n] + } + } + } + r.containers = containers + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.bylayer = layers + r.byname = names + if needSave { + return r.Save() + } + return nil +} + +func (r *containerStore) Save() error { + if !r.Locked() { + return errors.New("container store is not locked") + } + rpath := r.containerspath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jdata, err := json.Marshal(&r.containers) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newContainerStore(dir string) (ContainerStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + cstore := containerStore{ + lockfile: lockfile, + dir: dir, + containers: []*Container{}, + byid: make(map[string]*Container), + bylayer: make(map[string]*Container), + byname: make(map[string]*Container), + } + if err := cstore.Load(); err != nil { + return nil, err + } + return &cstore, nil +} + +func (r *containerStore) lookup(id string) (*Container, bool) { + if container, ok := r.byid[id]; ok { + return container, ok + } else if container, ok := r.byname[id]; ok { + return container, ok + } else if container, ok := r.bylayer[id]; ok { + return container, ok + } else if longid, err := r.idindex.Get(id); err == nil { + if container, ok := r.byid[longid]; ok { + return container, ok + } + } + return nil, false +} + +func (r *containerStore) ClearFlag(id string, flag string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + delete(container.Flags, flag) + return r.Save() +} + +func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if container.Flags == nil { + container.Flags = make(map[string]interface{}) + } + container.Flags[flag] = value + return r.Save() +} + +func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + if options.MountOpts != nil { + options.Flags["MountOpts"] = append([]string{}, options.MountOpts...) + } + names = dedupeNames(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, errors.Wrapf(ErrDuplicateName, + fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) + } + } + if err == nil { + container = &Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: copyStringInterfaceMap(options.Flags), + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + r.containers = append(r.containers, container) + r.byid[id] = container + r.idindex.Add(id) + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container + } + err = r.Save() + container = copyContainer(container) + } + return container, err +} + +func (r *containerStore) Metadata(id string) (string, error) { + if container, ok := r.lookup(id); ok { + return container.Metadata, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) SetMetadata(id, metadata string) error { + if container, ok := r.lookup(id); ok { + container.Metadata = metadata + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) removeName(container *Container, name string) { + container.Names = stringSliceWithoutValue(container.Names, name) +} + +func (r *containerStore) SetNames(id string, names []string) error { + names = dedupeNames(names) + if container, ok := r.lookup(id); ok { + for _, name := range container.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) + } + r.byname[name] = container + } + container.Names = names + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) Delete(id string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + id = container.ID + toDeleteIndex := -1 + for i, candidate := range r.containers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + delete(r.byid, id) + r.idindex.Delete(id) + delete(r.bylayer, container.LayerID) + for _, name := range container.Names { + delete(r.byname, name) + } + if toDeleteIndex != -1 { + // delete the container at toDeleteIndex + if toDeleteIndex == len(r.containers)-1 { + r.containers = r.containers[:len(r.containers)-1] + } else { + r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) + } + } + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +func (r *containerStore) Get(id string) (*Container, error) { + if container, ok := r.lookup(id); ok { + return copyContainer(container), nil + } + return nil, ErrContainerUnknown +} + +func (r *containerStore) Lookup(name string) (id string, err error) { + if container, ok := r.lookup(name); ok { + return container.ID, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *containerStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") + } + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return ioutil.ReadFile(r.datapath(c.ID, key)) +} + +func (r *containerStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") + } + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + } else { + return -1, err + } + } + return -1, ErrSizeUnknown +} + +func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") + } + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + } else { + return "", err + } + } + return "", ErrDigestUnknown +} + +func (r *containerStore) BigDataNames(id string) ([]string, error) { + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return copyStringSlice(c.BigDataNames), nil +} + +func (r *containerStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") + } + c, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) + if err == nil { + save := false + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := c.BigDataSizes[key] + c.BigDataSizes[key] = int64(len(data)) + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := c.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + c.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range c.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + c.BigDataNames = append(c.BigDataNames, key) + save = true + } + if save { + err = r.Save() + } + } + return err +} + +func (r *containerStore) Wipe() error { + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *containerStore) Lock() { + r.lockfile.Lock() +} + +func (r *containerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *containerStore) RLock() { + r.lockfile.RLock() +} +func (r *containerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *containerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *containerStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *containerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *containerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *containerStore) Locked() bool { + return r.lockfile.Locked() +} diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go new file mode 100644 index 0000000..aef6bec --- /dev/null +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -0,0 +1,1413 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: containers.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Container) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.WriteString(`"image":`) + fflib.WriteJsonString(buf, string(j.ImageID)) + buf.WriteString(`,"layer":`) + fflib.WriteJsonString(buf, string(j.LayerID)) + buf.WriteByte(',') + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.BigDataNames) != 0 { + buf.WriteString(`"big-data-names":`) + if j.BigDataNames != nil { + buf.WriteString(`[`) + for i, v := range j.BigDataNames { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.BigDataSizes) != 0 { + if j.BigDataSizes == nil { + buf.WriteString(`"big-data-sizes":null`) + } else { + buf.WriteString(`"big-data-sizes":{ `) + for key, value := range j.BigDataSizes { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.FormatBits2(buf, uint64(value), 10, value < 0) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if len(j.BigDataDigests) != 0 { + if j.BigDataDigests == nil { + buf.WriteString(`"big-data-digests":null`) + } else { + buf.WriteString(`"big-data-digests":{ `) + for key, value := range j.BigDataDigests { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.UIDMap) != 0 { + buf.WriteString(`"uidmap":`) + if j.UIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.UIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.GIDMap) != 0 { + buf.WriteString(`"gidmap":`) + if j.GIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.GIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtContainerbase = iota + ffjtContainernosuchkey + + ffjtContainerID + + ffjtContainerNames + + ffjtContainerImageID + + ffjtContainerLayerID + + ffjtContainerMetadata + + ffjtContainerBigDataNames + + ffjtContainerBigDataSizes + + ffjtContainerBigDataDigests + + ffjtContainerCreated + + ffjtContainerUIDMap + + ffjtContainerGIDMap + + ffjtContainerFlags +) + +var ffjKeyContainerID = []byte("id") + +var ffjKeyContainerNames = []byte("names") + +var ffjKeyContainerImageID = []byte("image") + +var ffjKeyContainerLayerID = []byte("layer") + +var ffjKeyContainerMetadata = []byte("metadata") + +var ffjKeyContainerBigDataNames = []byte("big-data-names") + +var ffjKeyContainerBigDataSizes = []byte("big-data-sizes") + +var ffjKeyContainerBigDataDigests = []byte("big-data-digests") + +var ffjKeyContainerCreated = []byte("created") + +var ffjKeyContainerUIDMap = []byte("uidmap") + +var ffjKeyContainerGIDMap = []byte("gidmap") + +var ffjKeyContainerFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Container) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Container) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtContainernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffjKeyContainerBigDataNames, kn) { + currentKey = ffjtContainerBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerBigDataSizes, kn) { + currentKey = ffjtContainerBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerBigDataDigests, kn) { + currentKey = ffjtContainerBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffjKeyContainerCreated, kn) { + currentKey = ffjtContainerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyContainerFlags, kn) { + currentKey = ffjtContainerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffjKeyContainerGIDMap, kn) { + currentKey = ffjtContainerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyContainerID, kn) { + currentKey = ffjtContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyContainerImageID, kn) { + currentKey = ffjtContainerImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffjKeyContainerLayerID, kn) { + currentKey = ffjtContainerLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyContainerMetadata, kn) { + currentKey = ffjtContainerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyContainerNames, kn) { + currentKey = ffjtContainerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffjKeyContainerUIDMap, kn) { + currentKey = ffjtContainerUIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyContainerFlags, kn) { + currentKey = ffjtContainerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerGIDMap, kn) { + currentKey = ffjtContainerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerUIDMap, kn) { + currentKey = ffjtContainerUIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerCreated, kn) { + currentKey = ffjtContainerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataDigests, kn) { + currentKey = ffjtContainerBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataSizes, kn) { + currentKey = ffjtContainerBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerBigDataNames, kn) { + currentKey = ffjtContainerBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerMetadata, kn) { + currentKey = ffjtContainerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerLayerID, kn) { + currentKey = ffjtContainerLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerImageID, kn) { + currentKey = ffjtContainerImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyContainerNames, kn) { + currentKey = ffjtContainerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyContainerID, kn) { + currentKey = ffjtContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtContainernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtContainerID: + goto handle_ID + + case ffjtContainerNames: + goto handle_Names + + case ffjtContainerImageID: + goto handle_ImageID + + case ffjtContainerLayerID: + goto handle_LayerID + + case ffjtContainerMetadata: + goto handle_Metadata + + case ffjtContainerBigDataNames: + goto handle_BigDataNames + + case ffjtContainerBigDataSizes: + goto handle_BigDataSizes + + case ffjtContainerBigDataDigests: + goto handle_BigDataDigests + + case ffjtContainerCreated: + goto handle_Created + + case ffjtContainerUIDMap: + goto handle_UIDMap + + case ffjtContainerGIDMap: + goto handle_GIDMap + + case ffjtContainerFlags: + goto handle_Flags + + case ffjtContainernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ImageID: + + /* handler: j.ImageID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ImageID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LayerID: + + /* handler: j.LayerID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.LayerID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataNames: + + /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataNames = nil + } else { + + j.BigDataNames = []string{} + + wantVal := true + + for { + + var tmpJBigDataNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataNames = string(string(outBuf)) + + } + } + + j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataSizes: + + /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataSizes = nil + } else { + + j.BigDataSizes = make(map[string]int64, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataSizes int64 + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmpJBigDataSizes = int64(tval) + + } + } + + j.BigDataSizes[k] = tmpJBigDataSizes + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataDigests: + + /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataDigests = nil + } else { + + j.BigDataDigests = make(map[string]digest.Digest, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataDigests digest.Digest + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataDigests = digest.Digest(string(outBuf)) + + } + } + + j.BigDataDigests[k] = tmpJBigDataDigests + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UIDMap: + + /* handler: j.UIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.UIDMap = nil + } else { + + j.UIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJUIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJUIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJUIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.UIDMap = append(j.UIDMap, tmpJUIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GIDMap: + + /* handler: j.GIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.GIDMap = nil + } else { + + j.GIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJGIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJGIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJGIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.GIDMap = append(j.GIDMap, tmpJGIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *containerStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *containerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtcontainerStorebase = iota + ffjtcontainerStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *containerStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *containerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtcontainerStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtcontainerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtcontainerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtcontainerStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go new file mode 100644 index 0000000..353d170 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -0,0 +1,747 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + mountpk "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker + mountOptions string +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") + + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs) + } + + var mountOptions string + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "aufs.mountopt": + mountOptions = val + default: + return nil, fmt.Errorf("option %s not supported", option) + } + } + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), + mountOptions: mountOptions, + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + }) + + for _, path := range []string{"mnt", "diff"} { + p := filepath.Join(root, path) + entries, err := ioutil.ReadDir(p) + if err != nil { + logger.WithError(err).WithField("dir", p).Error("error reading dir entries") + continue + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if strings.HasSuffix(entry.Name(), "-removing") { + logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") + if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { + logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") + } + } + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, a) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// Metadata not implemented +func (a *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (a *Driver) AdditionalImageStores() []string { + return nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if opts == nil { + opts = &graphdriver.CreateOpts{} + } + return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id, parent); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id, parent string) error { + paths := []string{ + "mnt", + "diff", + } + + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair() + if parent != "" { + st, err := system.Stat(path.Join(a.rootPath(), p, parent)) + if err != nil { + return err + } + rootPair.UID = int(st.UID()) + rootPair.GID = int(st.GID()) + } + if err := idtools.MkdirAllAndChownNew(path.Join(a.rootPath(), p, id), os.FileMode(0755), rootPair); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + "layer": id, + }) + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + if os.IsNotExist(err) { + break + } + return err + } + if !mounted { + break + } + + err = a.unmount(mountpoint) + if err == nil { + break + } + + if err != unix.EBUSY { + return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) + } + if retries >= 5 { + return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error removing layers dir for %s", id) + } + + if err := atomicRemove(a.getDiffPath(id)); err != nil { + return errors.Wrapf(err, "could not remove diff path for id %s", id) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that container runtime doesn't find it anymore) before doing removal of + // the whole tree. + if err := atomicRemove(mountpoint); err != nil { + if errors.Cause(err) == unix.EBUSY { + logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") + } + return errors.Wrapf(err, "could not remove mountpoint for id %s", id) + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +func atomicRemove(source string) error { + target := source + "-removing" + + err := os.Rename(source, target) + switch { + case err == nil, os.IsNotExist(err): + case os.IsExist(err): + // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove + if _, e := os.Stat(source); !os.IsNotExist(e) { + return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target) + } + default: + return errors.Wrapf(err, "error preparing atomic delete") + } + + return system.EnsureRemoveAll(target) +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, parents, options); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, idMappings *idtools.IDMappings, diff io.Reader) error { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, idMappings, parent, mountLabel, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, idMappings, diff); err != nil { + return + } + + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, options); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len(",dirperm1") + } + b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + mountOptions := a.mountOptions + if len(options.Options) > 0 { + mountOptions = strings.Join(options.Options, ",") + } + if mountOptions != "" { + opts += fmt.Sprintf(",%s", mountOptions) + } + + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel) + if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "storage-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "storage-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} + +// UpdateLayerIDMap updates ID mappings in a layer from matching the ones +// specified by toContainer to those specified by toHost. +func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("aufs doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (a *Driver) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go new file mode 100644 index 0000000..d2325fc --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go new file mode 100644 index 0000000..100e753 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := unix.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go new file mode 100644 index 0000000..937104b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "golang.org/x/sys/unix" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return unix.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go new file mode 100644 index 0000000..d030b06 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go new file mode 100644 index 0000000..30254d9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -0,0 +1,687 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "unsafe" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, userDiskQuota, err := parseOptions(options) + if err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + if userDiskQuota { + if err := driver.subvolEnableQuota(); err != nil { + return nil, err + } + } + + return graphdriver.NewNaiveDiffDriver(driver, graphdriver.NewNaiveLayerIDMapUpdater(driver)), nil +} + +func parseOptions(opt []string) (btrfsOptions, bool, error) { + var options btrfsOptions + userDiskQuota := false + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, userDiskQuota, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + case "btrfs.mountopt": + return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") + default: + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + } + } + return options, userDiskQuota, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// Metadata returns empty metadata for this driver. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if err := d.subvolDisableQuota(); err != nil { + return err + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat unix.Stat_t + if err := unix.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string, quotaEnabled bool) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := subvolQgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) subvolEnableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = true + + return nil +} + +func (d *Driver) subvolDisableQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = false + + return nil +} + +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func subvolQgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if err := d.subvolEnableQuota(); err != nil { + return err + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + return err + } + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + if len(options.Options) > 0 { + return "", fmt.Errorf("btrfs driver does not support mount options") + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.subvolEnableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go new file mode 100644 index 0000000..f070888 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go new file mode 100644 index 0000000..73d90cd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go new file mode 100644 index 0000000..f802fbc --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go new file mode 100644 index 0000000..f2f1ec3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -0,0 +1,137 @@ +package graphdriver + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" +) + +const ( + chownByMapsCmd = "storage-chown-by-maps" +) + +func init() { + reexec.Register(chownByMapsCmd, chownByMapsMain) +} + +func chownByMapsMain() { + if len(os.Args) < 2 { + fmt.Fprintf(os.Stderr, "requires mapping configuration on stdin and directory path") + os.Exit(1) + } + // Read and decode our configuration. + discreteMaps := [4][]idtools.IDMap{} + config := bytes.Buffer{} + if _, err := config.ReadFrom(os.Stdin); err != nil { + fmt.Fprintf(os.Stderr, "error reading configuration: %v", err) + os.Exit(1) + } + if err := json.Unmarshal(config.Bytes(), &discreteMaps); err != nil { + fmt.Fprintf(os.Stderr, "error decoding configuration: %v", err) + os.Exit(1) + } + // Try to chroot. This may not be possible, and on some systems that + // means we just Chdir() to the directory, so from here on we should be + // using relative paths. + if err := chrootOrChdir(os.Args[1]); err != nil { + fmt.Fprintf(os.Stderr, "error chrooting to %q: %v", os.Args[1], err) + os.Exit(1) + } + // Build the mapping objects. + toContainer := idtools.NewIDMappingsFromMaps(discreteMaps[0], discreteMaps[1]) + if len(toContainer.UIDs()) == 0 && len(toContainer.GIDs()) == 0 { + toContainer = nil + } + toHost := idtools.NewIDMappingsFromMaps(discreteMaps[2], discreteMaps[3]) + if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { + toHost = nil + } + chown := func(path string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error walking to %q: %v", path, err) + } + if path == "." { + return nil + } + return platformLChown(path, info, toHost, toContainer) + } + if err := filepath.Walk(".", chown); err != nil { + fmt.Fprintf(os.Stderr, "error during chown: %v", err) + os.Exit(1) + } + os.Exit(0) +} + +// ChownPathByMaps walks the filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error { + if toContainer == nil { + toContainer = &idtools.IDMappings{} + } + if toHost == nil { + toHost = &idtools.IDMappings{} + } + + config, err := json.Marshal([4][]idtools.IDMap{toContainer.UIDs(), toContainer.GIDs(), toHost.UIDs(), toHost.GIDs()}) + if err != nil { + return err + } + cmd := reexec.Command(chownByMapsCmd, path) + cmd.Stdin = bytes.NewReader(config) + output, err := cmd.CombinedOutput() + if len(output) > 0 && err != nil { + return fmt.Errorf("%v: %s", err, string(output)) + } + if err != nil { + return err + } + if len(output) > 0 { + return fmt.Errorf("%s", string(output)) + } + + return nil +} + +type naiveLayerIDMapUpdater struct { + ProtoDriver +} + +// NewNaiveLayerIDMapUpdater wraps the ProtoDriver in a LayerIDMapUpdater that +// uses ChownPathByMaps to update the ownerships in a layer's filesystem tree. +func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater { + return &naiveLayerIDMapUpdater{ProtoDriver: driver} +} + +// UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + driver := n.ProtoDriver + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return err + } + defer func() { + driver.Put(id) + }() + + return ChownPathByMaps(layerFs, toContainer, toHost) +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (n *naiveLayerIDMapUpdater) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go new file mode 100644 index 0000000..51d6d75 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -0,0 +1,77 @@ +// +build !windows + +package graphdriver + +import ( + "fmt" + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + sysinfo := info.Sys() + if st, ok := sysinfo.(*syscall.Stat_t); ok { + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUid, mappedGid, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + } + mappedUid, mappedGid = uid, gid + } + uid, gid = mappedUid, mappedGid + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHost(pair) + if err != nil { + return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + stat, err := os.Lstat(path) + if err != nil { + return fmt.Errorf("%s: lstat(%q): %v", os.Args[0], path, err) + } + cap, err := system.Lgetxattr(path, "security.capability") + if err != nil && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: Lgetxattr(%q): %v", os.Args[0], path, err) + } + + // Make the change. + if err := syscall.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err) + } + // Restore the SUID and SGID bits if they were originally set. + if (stat.Mode()&os.ModeSymlink == 0) && stat.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := os.Chmod(path, stat.Mode()); err != nil { + return fmt.Errorf("%s: chmod(%q): %v", os.Args[0], path, err) + } + } + if cap != nil { + if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { + return fmt.Errorf("%s: Lsetxattr(%q): %v", os.Args[0], path, err) + } + } + + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go new file mode 100644 index 0000000..31bd5bb --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -0,0 +1,14 @@ +// +build windows + +package graphdriver + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + return &os.PathError{"lchown", path, syscall.EWINDOWS} +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go new file mode 100644 index 0000000..c8c4905 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -0,0 +1,21 @@ +// +build linux darwin freebsd solaris + +package graphdriver + +import ( + "fmt" + "os" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("error chrooting to %q: %v", path, err) + } + if err := syscall.Chdir(string(os.PathSeparator)); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go new file mode 100644 index 0000000..f4dc22a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -0,0 +1,15 @@ +package graphdriver + +import ( + "fmt" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chdir(path); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go new file mode 100644 index 0000000..d614b78 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -0,0 +1,279 @@ +// +build cgo + +package copy + +/* +#include + +#ifndef FICLONE +#define FICLONE _IOW(0x94, 9, int) +#endif +*/ +import "C" +import ( + "container/list" + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota + // Hardlink creates a new hardlink to the existing file + Hardlink +) + +func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) + if err != nil { + return err + } + defer dstFile.Close() + + if *copyWithFileClone { + _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) + if err == nil { + return nil + } + + *copyWithFileClone = false + if err == unix.EXDEV { + *copyWithFileRange = false + } + } + if *copyWithFileRange { + err = doCopyWithFileRange(srcFile, dstFile, fileinfo) + // Trying the file_clone may not have caught the exdev case + // as the ioctl may not have been available (therefore EINVAL) + if err == unix.EXDEV || err == unix.ENOSYS { + *copyWithFileRange = false + } else { + return err + } + } + return legacyCopy(srcFile, dstFile) +} + +func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { + amountLeftToCopy := fileinfo.Size() + + for amountLeftToCopy > 0 { + n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) + if err != nil { + return err + } + + amountLeftToCopy = amountLeftToCopy - int64(n) + } + + return nil +} + +func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { + _, err := pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +type fileID struct { + dev uint64 + ino uint64 +} + +type dirMtimeInfo struct { + dstPath *string + stat *syscall.Stat_t +} + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling xattrs, and soft links +// +// Copying xattrs can be opted out of by passing false for copyXattrs. +func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { + copyWithFileRange := true + copyWithFileClone := true + + // This is a map of source file inodes to dst file paths + copiedFiles := make(map[fileID]string) + + dirsToSetMtimes := list.New() + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch mode := f.Mode(); { + case mode.IsRegular(): + id := fileID{dev: stat.Dev, ino: stat.Ino} + if copyMode == Hardlink { + isHardlink = true + if err2 := os.Link(srcPath, dstPath); err2 != nil { + return err2 + } + } else if hardLinkDstPath, ok := copiedFiles[id]; ok { + if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { + return err2 + } + } else { + if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { + return err2 + } + copiedFiles[id] = dstPath + } + + case mode.IsDir(): + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case mode&os.ModeSymlink != 0: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case mode&os.ModeNamedPipe != 0: + fallthrough + + case mode&os.ModeSocket != 0: + if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case mode&os.ModeDevice != 0: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if copyXattrs { + if err := doCopyXattrs(srcPath, dstPath); err != nil { + return err + } + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + // nolint: unconvert + if f.IsDir() { + dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) + } else if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { + mtimeInfo := e.Value.(*dirMtimeInfo) + ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} + if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { + return err + } + } + + return nil +} + +func doCopyXattrs(srcPath, dstPath string) error { + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go new file mode 100644 index 0000000..4d44f2f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -0,0 +1,19 @@ +// +build !linux !cgo + +package copy + +import "github.com/containers/storage/pkg/chrootarchive" + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota +) + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling soft links +func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go new file mode 100644 index 0000000..72551a3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -0,0 +1,59 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + infoOp(m) + count := m.count + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go new file mode 100644 index 0000000..f638452 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -0,0 +1,239 @@ +package devmapper + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + realPath, err := filepath.Abs(dev) + if err != nil { + return errors.Errorf("unable to get absolute path for %s: %s", dev, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) + } + if err := checkDevAvailable(realPath); err != nil { + return err + } + if err := checkDevInVG(realPath); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(realPath); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + lvmProfileDir := "/etc/lvm/profile" + binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} + + for _, bin := range binaries { + if _, err := exec.LookPath(bin); err != nil { + return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") + } + } + + err := os.MkdirAll(lvmProfileDir, 0755) + if err != nil { + return errors.Wrap(err, "error creating lvm profile directory") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + + out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing storage thinp autoextend profile") + } + + out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go new file mode 100644 index 0000000..b6f22e9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -0,0 +1,2846 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/dmesg" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/loopback" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/parsers/kernel" + units "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + return (devices.deviceIDMap[deviceID/8] & mask) == 0 +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) + info := &devInfo{ + Hash: hash, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error +func xfsSupported() error { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return err // error text is descriptive enough + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return errors.Wrapf(err, "error checking for xfs support") + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return nil + } + } + + if err := s.Err(); err != nil { + return errors.Wrapf(err, "error checking for xfs support") + } + + return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) +} + +func determineDefaultFS() string { + err := xfsSupported() + if err == nil { + return "xfs" + } + + logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err) + return "ext4" +} + +// mkfsOptions tries to figure out whether some additional mkfs options are required +func mkfsOptions(fs string) []string { + if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) { + // For kernels earlier than 3.16 (and newer xfsutils), + // some xfs features need to be explicitly disabled. + return []string{"-m", "crc=0,finobt=0"} + } + + return []string{} +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + args := mkfsOptions(devices.filesystem) + args = append(args, devices.mkfsArgs...) + args = append(args, devname) + + logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + // Cleanup right away if there are any leaked devices. Note this + // could cause some slowdown for process startup, if there were + // Leaked devices + devices.cleanupDeletedDevices() + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/containers/storage/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256))) + } + + defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + fileInfos, _ := ioutil.ReadDir("/proc/self/fd") + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + unix.CloseOnExec(fd) + } + } + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + return os.RemoveAll(devices.transactionMetaFile()) +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + // Kernel driver version >= 4.27.0 support deferred removal + + logrus.Debugf("devicemapper: kernel dm driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0]) + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1]) + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + var stat unix.Stat_t + err := unix.Stat(file.Name(), &stat) + if err != nil { + return 0, 0, err + } + + dev := stat.Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "storage-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the storage root dir + var st unix.Stat_t + if err := unix.Stat(devices.root, &st); err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // container-maj,min[-inode] stands for: + // - Managed by container storage + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) + if err != nil { + return err + } + switch fsMagic { + case graphdriver.FsMagicAufs: + return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") + } + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + // If deferred removal is enabled and deferred deletion is disabled + // then make sure device is removed synchronously. There have been + // some cases of device being busy for short duration and we would + // rather busy wait for device removal to take care of these cases. + deferredRemove := devices.deferredRemove + if !devices.deferredDelete { + deferredRemove = false + } + + if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + return devices.deactivateDeviceMode(info, devices.deferredRemove) +} + +func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { + var err error + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if deferredRemove { + err = devicemapper.RemoveDeviceDeferred(info.Name()) + } else { + err = devices.removeDevice(info.Name()) + } + + // This function's semantics is such that it does not return an + // error if device does not exist. So if device went away by + // the time we actually tried to remove it, do not return error. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if errors.Cause(err) != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if errors.Cause(err) != devicemapper.ErrBusy { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if errors.Cause(err) != devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := unix.Unmount(p, unix.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + mountOptions := devices.mountOptions + if len(moptions.Options) > 0 { + addNouuid := strings.Contains("nouuid", mountOptions) + mountOptions = strings.Join(moptions.Options, ",") + if addNouuid { + mountOptions = fmt.Sprintf("nouuid,%s", mountOptions) + } + } + + options = joinMountOptions(options, mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256))) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + unix.Unmount(path, unix.MNT_DETACH) + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + return devices.deactivateDevice(info) +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(unix.Statfs_t) + if err := unix.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig + testMode := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt", "devicemapper.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + case "dm.libdm_log_level": + level, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + } + if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { + return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + } + // Register a new logging callback with the specified level. + devicemapper.LogInit(devicemapper.DefaultLogger{ + Level: int(level), + }) + case "test": + testMode, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("devmapper: Unknown option %s", key) + } + } + + if !testMode { + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + } + + devices.lvmSetupConfig = lvmSetupConfig + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go new file mode 100644 index 0000000..9ab3e4f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go new file mode 100644 index 0000000..13677c9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -0,0 +1,244 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + locker *locker.Locker +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), + } + + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, + {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, + {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, + {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, + {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, + {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, + {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, + {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// Metadata returns a map of information about the device. +func (d *Driver) Metadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return fmt.Errorf("failed to remove device %s: %v", id, err) + } + return system.EnsureRemoveAll(path.Join(d.home, "mnt", id)) +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, options); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go new file mode 100644 index 0000000..1dc3262 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + var mntpointSt unix.Stat_t + if err := unix.Stat(mountpoint, &mntpointSt); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + var parentSt unix.Stat_t + if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { + return false, err + } + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go new file mode 100644 index 0000000..e8f8bd5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -0,0 +1,318 @@ +package graphdriver + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string + *idtools.IDMappings +} + +// MountOpts contains optional arguments for LayerStope.Mount() methods. +type MountOpts struct { + // Mount label is the MAC Labels to assign to mount point (SELINUX) + MountLabel string + // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point + UidMaps []idtools.IDMap + GidMaps []idtools.IDMap + Options []string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // CreateFromTemplate creates a new filesystem layer with the specified id + // and parent, with contents identical to the specified template layer. + CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Optionally it gets the mappings used to create the layer. + // Returns the absolute path to the mounted layered filesystem. + Get(id string, options MountOpts) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + Metadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error + // AdditionalImageStores returns additional image stores supported by the driver + AdditionalImageStores() []string +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The io.Reader must be an uncompressed stream. + ApplyDiff(id string, idMappings *idtools.IDMappings, parent string, mountLabel string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +} + +// LayerIDMapUpdater is the interface that implements ID map changes for layers. +type LayerIDMapUpdater interface { + // UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership + // information using the toContainer and toHost mappings, using them to replace + // on-disk owner UIDs and GIDs which are "host" values in the first map with + // UIDs and GIDs for "host" values from the second map which correspond to the + // same "container" IDs. This method should only be called after a layer is + // first created and populated, and before it is mounted, as other changes made + // relative to a parent layer, but before this method is called, may be discarded + // by Diff(). + UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error + + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a + // image and it is not required to Chown the files when running in an user namespace. + SupportsShifting() bool +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver + LayerIDMapUpdater +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) + return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + cause := errors.Cause(err) + return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go new file mode 100644 index 0000000..53394b7 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -0,0 +1,23 @@ +package graphdriver + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go new file mode 100644 index 0000000..a45f6b4 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -0,0 +1,136 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "golang.org/x/sys/unix" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "overlay", + // We don't support devicemapper without configuration + // "devicemapper", + "aufs", + "btrfs", + "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf unix.Statfs_t + if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go new file mode 100644 index 0000000..174fa96 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -0,0 +1,96 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + defer C.free(unsafe.Pointer(cs)) + buf := C.getstatfs(cs) + defer C.free(unsafe.Pointer(buf)) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + return false, ErrPrerequisites + } + + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go new file mode 100644 index 0000000..4a87560 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go new file mode 100644 index 0000000..ffd30c2 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go new file mode 100644 index 0000000..ab2c41e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -0,0 +1,217 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/sirupsen/logrus" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + LayerIDMapUpdater +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) +// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) +// ApplyDiff(id string, idMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (size int64, err error) +// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.TarWithOptions(layerFs, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, idMappings.UIDs(), idMappings.GIDs()) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + options := MountOpts{ + MountLabel: mountLabel, + } + parentFs, err = driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + if applyMappings == nil { + applyMappings = &idtools.IDMappings{} + } + + // Mount the root filesystem so we can apply the diff/layer. + mountOpts := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, mountOpts) + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{ + InUserNS: rsystem.RunningInUserNS(), + } + if applyMappings != nil { + options.UIDMaps = applyMappings.UIDs() + options.GIDMaps = applyMappings.GIDs() + } + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + logrus.Errorf("Error while applying layer: %s", err) + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel) + if err != nil { + return + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go new file mode 100644 index 0000000..a566e4a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -0,0 +1,165 @@ +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// doesSupportNativeDiff checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. +// When these exist naive diff should be used. +func doesSupportNativeDiff(d, mountOpts string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l1/d1, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + // rename "d1" to "d2" + if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { + // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled + if err.(*os.LinkError).Err == syscall.EXDEV { + return nil + } + return errors.Wrap(err, "failed to rename dir in merged directory") + } + // get the xattr of "d2" + xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect") + if err != nil { + return errors.Wrap(err, "failed to read redirect flag on upper layer") + } + + if string(xattrRedirect) == "d1" { + return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") + } + + return nil +} + +// doesMetacopy checks if the filesystem is going to optimize changes to +// metadata by using nodes marked with an "overlay.metacopy" attribute to avoid +// copying up a file from a lower layer unless/until its contents are being +// modified +func doesMetacopy(d, mountOpts string) (bool, error) { + td, err := ioutil.TempDir(d, "metacopy-check") + if err != nil { + return false, err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1, l2, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil { + return false, err + } + if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil { + return false, err + } + if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return false, err + } + // Mount using the mandatory options and configured options + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "l1"), path.Join(td, "l2"), path.Join(td, "work")) + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + return false, errors.Wrap(err, "failed to mount overlay for metacopy check") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + // Make a change that only impacts the inode, and check if the pulled-up copy is marked + // as a metadata-only copy + if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil { + return false, errors.Wrap(err, "error changing permissions on file for metacopy check") + } + metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), "trusted.overlay.metacopy") + if err != nil { + return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer") + } + return metacopy != nil, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go new file mode 100644 index 0000000..feb0395 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package overlay + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + + "github.com/containers/storage/pkg/reexec" + "golang.org/x/sys/unix" +) + +func init() { + reexec.Register("storage-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("storage-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) + } + return nil +} + +// mountfromMain is the entry-point for storage-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go new file mode 100644 index 0000000..ef83b6c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -0,0 +1,1147 @@ +// +build linux + +package overlay + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/drivers/overlayutils" + "github.com/containers/storage/drivers/quota" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fsutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/ostree" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + imageStores []string + quota quota.Quota + mountProgram string + ostreeRepo string + skipMountHome bool + mountOptions string +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + name string + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + usingMetacopy bool + locker *locker.Locker + convert map[string]bool +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register("overlay", Init) + graphdriver.Register("overlay2", Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + if opts.mountProgram == "" { + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) + } + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + var usingMetacopy bool + var supportsDType bool + if opts.mountProgram != "" { + supportsDType = true + } else { + supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID) + if err != nil { + os.Remove(filepath.Join(home, linkDir)) + os.Remove(home) + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return nil, err + } + return nil, errors.Wrap(err, "kernel does not support overlay fs") + } + usingMetacopy, err = doesMetacopy(home, opts.mountOptions) + if err == nil { + if usingMetacopy { + logrus.Debugf("overlay test mount indicated that metacopy is being used") + } else { + logrus.Debugf("overlay test mount indicated that metacopy is not being used") + } + } else { + logrus.Warnf("overlay test mount did not indicate whether or not metacopy is being used: %v", err) + return nil, err + } + } + + if !opts.skipMountHome { + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + } + + if opts.ostreeRepo != "" { + if err := ostree.CreateOSTreeRepository(opts.ostreeRepo, rootUID, rootGID); err != nil { + return nil, err + } + } + + d := &Driver{ + name: "overlay", + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + usingMetacopy: usingMetacopy, + locker: locker.New(), + options: *opts, + convert: make(map[string]bool), + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } else if opts.quota.Size > 0 { + return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err) + } + } else if opts.quota.Size > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. + return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs) + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check": + logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary") + case ".mountopt", "overlay.mountopt", "overlay2.mountopt": + o.mountOptions = val + case ".size", "overlay.size", "overlay2.size": + logrus.Debugf("overlay: size=%s", val) + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) + case ".imagestore", "overlay.imagestore", "overlay2.imagestore": + logrus.Debugf("overlay: imagestore=%s", val) + // Additional read only image stores to use for lower paths + for _, store := range strings.Split(val, ",") { + store = filepath.Clean(store) + if !filepath.IsAbs(store) { + return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) + } + st, err := os.Stat(store) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: image path %q must be a directory", store) + } + o.imageStores = append(o.imageStores, store) + } + case ".mount_program", "overlay.mount_program", "overlay2.mount_program": + logrus.Debugf("overlay: mount_program=%s", val) + _, err := os.Stat(val) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err) + } + o.mountProgram = val + case "overlay2.ostree_repo", "overlay.ostree_repo", ".ostree_repo": + logrus.Debugf("overlay: ostree_repo=%s", val) + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing") + } + o.ostreeRepo = val + case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home": + logrus.Debugf("overlay: skip_mount_home=%s", val) + o.skipMountHome, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("overlay: Unknown option %s", key) + } + } + return o, nil +} + +func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { + // We can try to modprobe overlay first + + exec.Command("modprobe", "overlay").Run() + + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + } + if err == nil { + // Check if reading the directory's contents populates the d_type field, which is required + // for proper operation of the overlay filesystem. + supportsDType, err = fsutils.SupportsDType(layerDir) + if err != nil { + return false, err + } + if !supportsDType { + return false, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + + // Try a test mount in the specific location we're looking at using. + mergedDir := filepath.Join(layerDir, "merged") + lower1Dir := filepath.Join(layerDir, "lower1") + lower2Dir := filepath.Join(layerDir, "lower2") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + defer func() { + // Permitted to fail, since the various subdirectories + // can be empty or not even there, and the home might + // legitimately be not empty + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + _ = os.RemoveAll(layerDir) + _ = os.Remove(home) + }() + _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) + flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir) + if len(flags) < unix.Getpagesize() { + err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) + if err == nil { + logrus.Debugf("overlay test mount with multiple lowers succeeded") + return supportsDType, nil + } else { + logrus.Debugf("overlay test mount with multiple lowers failed %v", err) + } + } + flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) + if len(flags) < unix.Getpagesize() { + err := mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) + if err == nil { + logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + } else { + logrus.Debugf("overlay test mount with a single lower failed %v", err) + } + } + logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) + } + + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") +} + +func (d *Driver) useNaiveDiff() bool { + useNaiveDiffLock.Do(func() { + if d.options.mountProgram != "" { + useNaiveDiffOnly = true + return + } + if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil { + logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return d.name +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())}, + {"Using metacopy", strconv.FormatBool(d.usingMetacopy)}, + } +} + +// Metadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + return d.create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } + } + + if d.options.ostreeRepo != "" { + d.convert[id] = true + } + + return d.create(id, parent, opts) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + dir := d.dir(id) + + uidMaps := d.uidMaps + gidMaps := d.gidMaps + + if opts != nil && opts.IDMappings != nil { + uidMaps = opts.IDMappings.UIDs() + gidMaps = opts.IDMappings.GIDs() + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + // Make the link directory if it does not exist + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootUID = int(st.UID()) + rootGID = int(st.GID()) + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + // if no parent directory, create a dummy lower directory and skip writing a "lowers" file + if parent == "" { + return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID) + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + newpath := path.Join(d.home, id) + if _, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + l := path.Join(p, d.name, id) + _, err = os.Stat(l) + if err == nil { + return l + } + } + } + return newpath +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lower := d.dir(s) + lp, err := os.Readlink(lower) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string { + if uidMaps == nil { + uidMaps = d.uidMaps + } + if gidMaps == nil { + gidMaps = d.gidMaps + } + if uidMaps != nil { + var uids, gids bytes.Buffer + for _, i := range uidMaps { + if uids.Len() > 0 { + uids.WriteString(":") + } + uids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + for _, i := range gidMaps { + if gids.Len() > 0 { + gids.WriteString(":") + } + gids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + return fmt.Sprintf("%s,uidmapping=%s,gidmapping=%s", opts, uids.String(), gids.String()) + } + return opts +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + + // Ignore errors, we don't want to fail if the ostree branch doesn't exist, + if d.options.ostreeRepo != "" { + ostree.DeleteOSTree(d.options.ostreeRepo, id) + } + + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// recreateSymlinks goes through the driver's home directory and checks if the diff directory +// under each layer has a symlink created for it under the linkDir. If the symlink does not +// exist, it creates them +func (d *Driver) recreateSymlinks() error { + // List all the directories under the home directory + dirs, err := ioutil.ReadDir(d.home) + if err != nil { + return fmt.Errorf("error reading driver home directory %q: %v", d.home, err) + } + // This makes the link directory if it doesn't exist + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return err + } + for _, dir := range dirs { + // Skip over the linkDir and anything that is not a directory + if dir.Name() == linkDir || !dir.Mode().IsDir() { + continue + } + // Read the "link" file under each layer to get the name of the symlink + data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) + if err != nil { + return fmt.Errorf("error reading name of symlink for %q: %v", dir, err) + } + linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) + // Check if the symlink exists, and if it doesn't create it again with the name we + // got from the "link" file + _, err = os.Stat(linkPath) + if err != nil && os.IsNotExist(err) { + if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { + return err + } + } else if err != nil { + return fmt.Errorf("error trying to stat %q: %v", linkPath, err) + } + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + return d.get(id, false, options) +} + +func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + // absLowers is the list of lowers as absolute paths, which works well with additional stores. + absLowers := []string{} + // relLowers is the list of lowers as paths relative to the driver's home directory. + relLowers := []string{} + + // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers + // lists that we're building. "diff" itself is the upper, so it won't be in the lists. + link, err := ioutil.ReadFile(path.Join(dir, "link")) + if err != nil { + return "", err + } + diffN := 1 + _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + for err == nil { + absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) + diffN++ + _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + } + + // For each lower, resolve its path, and append it and any additional diffN + // directories to the lowers list. + for _, l := range strings.Split(string(lowers), ":") { + if l == "" { + continue + } + lower := "" + newpath := path.Join(d.home, l) + if _, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + lower = path.Join(p, d.name, l) + if _, err2 := os.Stat(lower); err2 == nil { + break + } + lower = "" + } + // if it is a "not found" error, that means the symlinks were lost in a sudden reboot + // so call the recreateSymlinks function to go through all the layer dirs and recreate + // the symlinks with the name from their respective "link" files + if lower == "" && os.IsNotExist(err) { + logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) + if err := d.recreateSymlinks(); err != nil { + return "", fmt.Errorf("error recreating the missing symlinks: %v", err) + } + lower = newpath + } else if lower == "" { + return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) + } + } else { + lower = newpath + } + absLowers = append(absLowers, lower) + relLowers = append(relLowers, l) + diffN = 1 + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + for err == nil { + absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN))) + diffN++ + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + } + } + + // If the lowers list is still empty, use an empty lower so that we can still force an + // SELinux context for the mount. + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) + relLowers = append(relLowers, path.Join(id, "empty")) + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr) + } + } + } + }() + + workDir := path.Join(dir, "work") + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir) + if len(options.Options) > 0 { + opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts) + } else if d.options.mountOptions != "" { + opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts) + } + mountData := label.FormatMountLabel(opts, options.MountLabel) + mountFunc := unix.Mount + mountTarget := mergedDir + + pageSize := unix.Getpagesize() + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if d.options.mountProgram != "" { + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + if !disableShifting { + label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps) + } + + mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) + mountProgram.Dir = d.home + var b bytes.Buffer + mountProgram.Stderr = &b + err := mountProgram.Run() + if err != nil { + output := b.String() + if output == "" { + output = "" + } + return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) + } + return nil + } + } else if len(mountData) > pageSize { + //FIXME: We need to figure out to get this to work with additional stores + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, options.MountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + flags, data := mount.ParseOptions(mountData) + logrus.Debugf("overlay: mount_data=%s", mountData) + if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + return err + } + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { + whiteoutFormat := archive.OverlayWhiteoutFormat + if d.options.mountProgram != "" { + // If we are using a mount program, we are most likely running + // as an unprivileged user that cannot use mknod, so fallback to the + // AUFS whiteout format. + whiteoutFormat = archive.AUFSWhiteoutFormat + } + return whiteoutFormat +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent string, mountLabel string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, idMappings, parent, mountLabel, diff) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: rsystem.RunningInUserNS(), + }); err != nil { + return 0, err + } + + _, convert := d.convert[id] + if convert { + if err := ostree.ConvertToOSTree(d.options.ostreeRepo, applyDir, id); err != nil { + return 0, err + } + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + WhiteoutFormat: d.getWhiteoutFormat(), + WhiteoutData: lowerDirs, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return d.options.imageStores +} + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + var err error + dir := d.dir(id) + diffDir := filepath.Join(dir, "diff") + + rootUID, rootGID := 0, 0 + if toHost != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) + if err != nil { + return err + } + } + + // Mount the new layer and handle ownership changes and possible copy_ups in it. + options := graphdriver.MountOpts{ + MountLabel: mountLabel, + Options: strings.Split(d.options.mountOptions, ","), + } + layerFs, err := d.get(id, true, options) + if err != nil { + return err + } + err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost) + if err != nil { + if err2 := d.Put(id); err2 != nil { + logrus.Errorf("%v; error unmounting %v: %v", err, id, err2) + } + return err + } + if err = d.Put(id); err != nil { + return err + } + + // Rotate the diff directories. + i := 0 + _, err = os.Stat(nameWithSuffix(diffDir, i)) + for err == nil { + i++ + _, err = os.Stat(nameWithSuffix(diffDir, i)) + } + for i > 0 { + err = os.Rename(nameWithSuffix(diffDir, i-1), nameWithSuffix(diffDir, i)) + if err != nil { + return err + } + i-- + } + + // Re-create the directory that we're going to use as the upper layer. + if err := idtools.MkdirAs(diffDir, 0755, rootUID, rootGID); err != nil { + return err + } + return nil +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { + return true + } + return d.options.mountProgram != "" +} + +// dumbJoin is more or less a dumber version of filepath.Join, but one which +// won't Clean() the path, allowing us to append ".." as a component and trust +// pathname resolution to do some non-obvious work. +func dumbJoin(names ...string) string { + if len(names) == 0 { + return string(os.PathSeparator) + } + return strings.Join(names, string(os.PathSeparator)) +} + +func nameWithSuffix(name string, number int) string { + if number == 0 { + return name + } + return fmt.Sprintf("%s%d", name, number) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go new file mode 100644 index 0000000..3dbb4de --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go new file mode 100644 index 0000000..fc565ef --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -0,0 +1,81 @@ +// +build linux + +package overlay + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == unix.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go new file mode 100644 index 0000000..49aaad0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -0,0 +1,20 @@ +// +build linux + +package overlayutils + +import ( + "fmt" + + "github.com/containers/storage/drivers" + "github.com/pkg/errors" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type is not supported." + return errors.Wrap(graphdriver.ErrNotSupported, msg) +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go new file mode 100644 index 0000000..93e7443 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -0,0 +1,337 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "path" + "path/filepath" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/containers/storage/overlay >> /etc/projects +// echo storage:999 >> /etc/projid +// xfs_quota -x -c 'project -s storage' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with containers/storage. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case someone copied the home directory over to a new device + unix.Unlink(backingFsBlockDev) + if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go new file mode 100644 index 0000000..7743dce --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/containers/storage/drivers/aufs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go new file mode 100644 index 0000000..40ff1cd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/containers/storage/drivers/btrfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go new file mode 100644 index 0000000..08ac984 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/containers/storage/drivers/devmapper" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go new file mode 100644 index 0000000..2d61219 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/containers/storage/drivers/overlay" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/github.com/containers/storage/drivers/register/register_vfs.go new file mode 100644 index 0000000..691ce85 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/containers/storage/drivers/vfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_windows.go b/vendor/github.com/containers/storage/drivers/register/register_windows.go new file mode 100644 index 0000000..048b270 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/containers/storage/drivers/windows" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go new file mode 100644 index 0000000..c748468 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/containers/storage/drivers/zfs" +) diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go new file mode 100644 index 0000000..dfcbffb --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -0,0 +1,45 @@ +package graphdriver + +import ( + "github.com/sirupsen/logrus" + + "github.com/containers/storage/pkg/idtools" +) + +// TemplateDriver is just barely enough of a driver that we can implement a +// naive version of CreateFromTemplate on top of it. +type TemplateDriver interface { + DiffDriver + CreateReadWrite(id, parent string, opts *CreateOpts) error + Create(id, parent string, opts *CreateOpts) error + Remove(id string) error +} + +// CreateFromTemplate creates a layer with the same contents and parent as +// another layer. Internally, it may even depend on that other layer +// continuing to exist, as if it were actually a child of the child layer. +func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error { + var err error + if readWrite { + err = d.CreateReadWrite(id, parent, opts) + } else { + err = d.Create(id, parent, opts) + } + if err != nil { + return err + } + diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel) + if err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("error removing layer %q: %v", id, err2) + } + return err + } + if _, err = d.ApplyDiff(id, templateIDMappings, parent, opts.MountLabel, diff); err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("error removing layer %q: %v", id, err2) + } + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go new file mode 100644 index 0000000..8137fcf --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go @@ -0,0 +1,7 @@ +package vfs + +import "github.com/containers/storage/drivers/copy" + +func dirCopy(srcDir, dstDir string) error { + return copy.DirCopy(srcDir, dstDir, copy.Content, false) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go new file mode 100644 index 0000000..8ac80ee --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package vfs // import "github.com/containers/storage/drivers/vfs" + +import "github.com/containers/storage/pkg/chrootarchive" + +func dirCopy(srcDir, dstDir string) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go new file mode 100644 index 0000000..9e25685 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -0,0 +1,226 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ostree" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +var ( + // CopyDir defines the copy method to use. + CopyDir = dirCopy +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + homes: []string{home}, + idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), + } + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + return nil, err + } + for _, option := range options { + if strings.HasPrefix(option, "vfs.imagestore=") { + d.homes = append(d.homes, strings.Split(option[15:], ",")...) + continue + } + if strings.HasPrefix(option, ".imagestore=") { + d.homes = append(d.homes, strings.Split(option[12:], ",")...) + continue + } + if strings.HasPrefix(option, "vfs.ostree_repo=") { + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing") + } + d.ostreeRepo = option[16:] + } + if strings.HasPrefix(option, ".ostree_repo=") { + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing") + } + d.ostreeRepo = option[13:] + } + if strings.HasPrefix(option, "vfs.mountopt=") { + return nil, fmt.Errorf("vfs driver does not support mount options") + } + } + if d.ostreeRepo != "" { + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := ostree.CreateOSTreeRepository(d.ostreeRepo, rootUID, rootGID); err != nil { + return nil, err + } + } + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + homes []string + idMappings *idtools.IDMappings + ostreeRepo string +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, false) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + idMappings := d.idMappings + if opts != nil && opts.IDMappings != nil { + idMappings = opts.IDMappings + } + + dir := d.dir(id) + rootIDs := idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + return err + } + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootIDs.UID = int(st.UID()) + rootIDs.GID = int(st.GID()) + } + if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent != "" { + parentDir, err := d.Get(parent, graphdriver.MountOpts{}) + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := dirCopy(parentDir, dir); err != nil { + return err + } + } + + if ro && d.ostreeRepo != "" { + if err := ostree.ConvertToOSTree(d.ostreeRepo, dir, id); err != nil { + return err + } + } + return nil + +} + +func (d *Driver) dir(id string) string { + for i, home := range d.homes { + if i > 0 { + home = filepath.Join(home, d.String()) + } + candidate := filepath.Join(home, "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } + } + return filepath.Join(d.homes[0], "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if d.ostreeRepo != "" { + // Ignore errors, we don't want to fail if the ostree branch doesn't exist, + ostree.DeleteOSTree(d.ostreeRepo, id) + } + return system.EnsureRemoveAll(d.dir(id)) +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + dir := d.dir(id) + if len(options.Options) > 0 { + return "", fmt.Errorf("vfs driver does not support mount options") + } + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + if len(d.homes) > 1 { + return d.homes[1:] + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go new file mode 100644 index 0000000..c7df1c1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -0,0 +1,992 @@ +//+build windows + +package windows + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/longpath" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + for _, option := range options { + if strings.HasPrefix(option, "windows.mountopt=") { + return nil, fmt.Errorf("windows driver does not support mount options") + } else { + return nil, fmt.Errorf("option %s not supported", option) + } + } + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = windows.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = windows.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enhancing the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel) + var dir string + + if len(options.Options) > 0 { + return "", fmt.Errorf("windows driver does not support mount options") + } + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + _, exists := d.cache[rID] + delete(d.cache, rID) + d.cacheMu.Unlock() + + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent, mountLabel string, diff io.Reader) (int64, error) { + panicIfUsedByLcow() + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + panicIfUsedByLcow() + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, idMappings, rPId, parentMappings, mountLabel) + if err != nil { + return + } + + layerFs, err := d.Get(id, graphdriver.MountOpts{}) + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// Metadata returns custom driver information. +func (d *Driver) Metadata(id string) (map[string]string, error) { + panicIfUsedByLcow() + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := windows.UTF16FromString(path) + if err != nil { + return err + } + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} + +// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from +// matching those in toContainer to matching those in toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("windows doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + return false +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go new file mode 100644 index 0000000..eaa9e8b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -0,0 +1,449 @@ +// +build linux freebsd + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/mistifyio/go-zfs" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +type zfsOptions struct { + fsName string + mountPath string + mountOptions string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.WithField("storage-driver", "zfs").Debugf("%s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + logger := logrus.WithField("storage-driver", "zfs") + + if _, err := exec.LookPath("zfs"); err != nil { + logger.Debugf("zfs command is not available: %v", err) + return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available") + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) + if err != nil { + logger.Debugf("cannot open /dev/zfs: %v", err) + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err) + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + case "zfs.mountopt": + options.mountOptions = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := unix.Stat(m.Mountpoint, &stat); err != nil { + logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is called on when program exits, it is a no-op for ZFS. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// Metadata returns image/container metadata related to graph driver +func (d *Driver) Metadata(id string) (map[string]string, error) { + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mountpoint); c <= 0 { + if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil { + logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) + } + if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) + } + + } + } + }() + + mountOptions := d.options.mountOptions + if len(options.Options) > 0 { + mountOptions = strings.Join(options.Options, ",") + } + + filesystem := d.zfsPath(id) + opts := label.FormatMountLabel(mountOptions, options.MountLabel) + logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { + return "", errors.Wrap(err, "error creating zfs mount") + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + + logger := logrus.WithField("storage-driver", "zfs") + + logger.Debugf(`unmount("%s")`, mountpoint) + + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + } + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go new file mode 100644 index 0000000..bf69051 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -0,0 +1,39 @@ +package zfs + +import ( + "fmt" + "strings" + + "github.com/containers/storage/drivers" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootdir string) error { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go new file mode 100644 index 0000000..fb1ef3a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -0,0 +1,29 @@ +package zfs + +import ( + "github.com/containers/storage/drivers" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func checkRootdirFs(rootDir string) error { + fsMagic, err := graphdriver.GetFSMagic(rootDir) + if err != nil { + return err + } + backingFS := "unknown" + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFS = fsName + } + + if fsMagic != graphdriver.FsMagicZfs { + logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir) + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go new file mode 100644 index 0000000..643b169 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go new file mode 100644 index 0000000..63cb9ab --- /dev/null +++ b/vendor/github.com/containers/storage/errors.go @@ -0,0 +1,56 @@ +package storage + +import ( + "errors" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = errors.New("container not known") + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = errors.New("image not known") + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = errors.New("parent of layer not known") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = errors.New("layer not known") + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = errors.New("error loading storage metadata") + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = errors.New("that ID is already in use") + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = errors.New("that name is already in use") + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = errors.New("would-be parent layer is a container") + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = errors.New("identifier is not a container") + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = errors.New("identifier is not an image") + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = errors.New("identifier is not a layer") + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = errors.New("layer has children") + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = errors.New("layer is in use by an image") + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = errors.New("image is in use by a container") + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = errors.New("size is not known") + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = errors.New("not a valid name for a big data item") + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = errors.New("could not compute digest of item") + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = errors.New("layer is not mounted") +) diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go new file mode 100644 index 0000000..6f48750 --- /dev/null +++ b/vendor/github.com/containers/storage/images.go @@ -0,0 +1,777 @@ +package storage + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const ( + // ImageDigestManifestBigDataNamePrefix is a prefix of big data item + // names which we consider to be manifests, used for computing a + // "digest" value for the image as a whole, by which we can locate the + // image later. + ImageDigestManifestBigDataNamePrefix = "manifest" + // ImageDigestBigDataKey is provided for compatibility with older + // versions of the image library. It will be removed in the future. + ImageDigestBigDataKey = "manifest" +) + +// An Image is a reference to a layer and an associated metadata string. +type Image struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Digest is a digest value that we can use to locate the image, if one + // was specified at creation-time. + Digest digest.Digest `json:"digest,omitempty"` + + // Digests is a list of digest values of the image's manifests, and + // possibly a manually-specified value, that we can use to locate the + // image. If Digest is set, its value is also in this list. + Digests []digest.Digest `json:"-"` + + // Names is an optional set of user-defined convenience values. The + // image can be referred to by its ID or any of its names. Names are + // unique among images, and are often the text representation of tagged + // or canonical references. + Names []string `json:"names,omitempty"` + + // TopLayer is the ID of the topmost layer of the image itself, if the + // image contains one or more layers. Multiple images can refer to the + // same top layer. + TopLayer string `json:"layer,omitempty"` + + // MappedTopLayers are the IDs of alternate versions of the top layer + // which have the same contents and parent, and which differ from + // TopLayer only in which ID mappings they use. When the image is + // to be removed, they should be removed before the TopLayer, as the + // graph driver may depend on that. + MappedTopLayers []string `json:"mapped-layers,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this image was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // ReadOnly is true if this image resides in a read-only layer store. + ReadOnly bool `json:"-"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ROImageStore provides bookkeeping for information about Images. +type ROImageStore interface { + ROFileBasedStore + ROMetadataStore + ROBigDataStore + + // Exists checks if there is an image with the given ID or name. + Exists(id string) bool + + // Get retrieves information about an image given an ID or name. + Get(id string) (*Image, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Images returns a slice enumerating the known images. + Images() ([]Image, error) + + // ByDigest returns a slice enumerating the images which have either an + // explicitly-set digest, or a big data item with a name that starts + // with ImageDigestManifestBigDataNamePrefix, which matches the + // specified digest. + ByDigest(d digest.Digest) ([]*Image, error) +} + +// ImageStore provides bookkeeping for information about Images. +type ImageStore interface { + ROImageStore + RWFileBasedStore + RWMetadataStore + RWImageBigDataStore + FlaggableStore + + // Create creates an image that has a specified ID (or a random one) and + // optional names, using the specified layer as its topmost (hopefully + // read-only) layer. That layer can be referenced by multiple images. + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) + + // SetNames replaces the list of names associated with an image with the + // supplied values. The values are expected to be valid normalized + // named image references. + SetNames(id string, names []string) error + + // Delete removes the record of the image. + Delete(id string) error + + // Wipe removes records of all images. + Wipe() error +} + +type imageStore struct { + lockfile Locker + dir string + images []*Image + idindex *truncindex.TruncIndex + byid map[string]*Image + byname map[string]*Image + bydigest map[digest.Digest][]*Image +} + +func copyImage(i *Image) *Image { + return &Image{ + ID: i.ID, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + Names: copyStringSlice(i.Names), + TopLayer: i.TopLayer, + MappedTopLayers: copyStringSlice(i.MappedTopLayers), + Metadata: i.Metadata, + BigDataNames: copyStringSlice(i.BigDataNames), + BigDataSizes: copyStringInt64Map(i.BigDataSizes), + BigDataDigests: copyStringDigestMap(i.BigDataDigests), + Created: i.Created, + ReadOnly: i.ReadOnly, + Flags: copyStringInterfaceMap(i.Flags), + } +} + +func copyImageSlice(slice []*Image) []*Image { + if len(slice) > 0 { + cp := make([]*Image, len(slice)) + for i := range slice { + cp[i] = copyImage(slice[i]) + } + return cp + } + return nil +} + +func (r *imageStore) Images() ([]Image, error) { + images := make([]Image, len(r.images)) + for i := range r.images { + images[i] = *copyImage(r.images[i]) + } + return images, nil +} + +func (r *imageStore) imagespath() string { + return filepath.Join(r.dir, "images.json") +} + +func (r *imageStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *imageStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +// bigDataNameIsManifest determines if a big data item with the specified name +// is considered to be representative of the image, in that its digest can be +// said to also be the image's digest. Currently, if its name is, or begins +// with, "manifest", we say that it is. +func bigDataNameIsManifest(name string) bool { + return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix) +} + +// recomputeDigests takes a fixed digest and a name-to-digest map and builds a +// list of the unique values that would identify the image. +func (image *Image) recomputeDigests() error { + validDigests := make([]digest.Digest, 0, len(image.BigDataDigests)+1) + digests := make(map[digest.Digest]struct{}) + if image.Digest != "" { + if err := image.Digest.Validate(); err != nil { + return errors.Wrapf(err, "error validating image digest %q", string(image.Digest)) + } + digests[image.Digest] = struct{}{} + validDigests = append(validDigests, image.Digest) + } + for name, digest := range image.BigDataDigests { + if !bigDataNameIsManifest(name) { + continue + } + if digest.Validate() != nil { + return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name) + } + // Deduplicate the digest values. + if _, known := digests[digest]; !known { + digests[digest] = struct{}{} + validDigests = append(validDigests, digest) + } + } + if image.Digest == "" && len(validDigests) > 0 { + image.Digest = validDigests[0] + } + image.Digests = validDigests + return nil +} + +func (r *imageStore) Load() error { + shouldSave := false + rpath := r.imagespath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + images := []*Image{} + idlist := []string{} + ids := make(map[string]*Image) + names := make(map[string]*Image) + digests := make(map[digest.Digest][]*Image) + if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(images)) + for n, image := range images { + ids[image.ID] = images[n] + idlist = append(idlist, image.ID) + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + shouldSave = true + } + } + // Compute the digest list. + err = image.recomputeDigests() + if err != nil { + return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names) + } + for _, name := range image.Names { + names[name] = image + } + for _, digest := range image.Digests { + list := digests[digest] + digests[digest] = append(list, image) + } + image.ReadOnly = !r.IsReadWrite() + } + } + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { + return ErrDuplicateImageNames + } + r.images = images + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.byname = names + r.bydigest = digests + if shouldSave { + return r.Save() + } + return nil +} + +func (r *imageStore) Save() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) + } + if !r.Locked() { + return errors.New("image store is not locked for writing") + } + rpath := r.imagespath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jdata, err := json.Marshal(&r.images) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newImageStore(dir string) (ImageStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func newROImageStore(dir string) (ROImageStore, error) { + lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func (r *imageStore) lookup(id string) (*Image, bool) { + if image, ok := r.byid[id]; ok { + return image, ok + } else if image, ok := r.byname[id]; ok { + return image, ok + } else if longid, err := r.idindex.Get(id); err == nil { + image, ok := r.byid[longid] + return image, ok + } + return nil, false +} + +func (r *imageStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return ErrImageUnknown + } + delete(image.Flags, flag) + return r.Save() +} + +func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return ErrImageUnknown + } + if image.Flags == nil { + image.Flags = make(map[string]interface{}) + } + image.Flags[flag] = value + return r.Save() +} + +func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { + if !r.IsReadWrite() { + return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id) + } + names = dedupeNames(names) + for _, name := range names { + if image, nameInUse := r.byname[name]; nameInUse { + return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID) + } + } + if created.IsZero() { + created = time.Now().UTC() + } + if err == nil { + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err := image.recomputeDigests() + if err != nil { + return nil, errors.Wrapf(err, "error validating digests for new image") + } + r.images = append(r.images, image) + r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) + } + err = r.Save() + image = copyImage(image) + } + return image, err +} + +func (r *imageStore) addMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + image.MappedTopLayers = append(image.MappedTopLayers, layer) + return r.Save() + } + return ErrImageUnknown +} + +func (r *imageStore) Metadata(id string) (string, error) { + if image, ok := r.lookup(id); ok { + return image.Metadata, nil + } + return "", ErrImageUnknown +} + +func (r *imageStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath()) + } + if image, ok := r.lookup(id); ok { + image.Metadata = metadata + return r.Save() + } + return ErrImageUnknown +} + +func (r *imageStore) removeName(image *Image, name string) { + image.Names = stringSliceWithoutValue(image.Names, name) +} + +func (r *imageStore) SetNames(id string, names []string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) + } + names = dedupeNames(names) + if image, ok := r.lookup(id); ok { + for _, name := range image.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) + } + r.byname[name] = image + } + image.Names = names + return r.Save() + } + return ErrImageUnknown +} + +func (r *imageStore) Delete(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return ErrImageUnknown + } + id = image.ID + toDeleteIndex := -1 + for i, candidate := range r.images { + if candidate.ID == id { + toDeleteIndex = i + } + } + delete(r.byid, id) + r.idindex.Delete(id) + for _, name := range image.Names { + delete(r.byname, name) + } + for _, digest := range image.Digests { + prunedList := imageSliceWithoutValue(r.bydigest[digest], image) + if len(prunedList) == 0 { + delete(r.bydigest, digest) + } else { + r.bydigest[digest] = prunedList + } + } + if toDeleteIndex != -1 { + // delete the image at toDeleteIndex + if toDeleteIndex == len(r.images)-1 { + r.images = r.images[:len(r.images)-1] + } else { + r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) + } + } + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +func (r *imageStore) Get(id string) (*Image, error) { + if image, ok := r.lookup(id); ok { + return copyImage(image), nil + } + return nil, ErrImageUnknown +} + +func (r *imageStore) Lookup(name string) (id string, err error) { + if image, ok := r.lookup(name); ok { + return image.ID, nil + } + return "", ErrImageUnknown +} + +func (r *imageStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { + if images, ok := r.bydigest[d]; ok { + return copyImageSlice(images), nil + } + return nil, ErrImageUnknown +} + +func (r *imageStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") + } + image, ok := r.lookup(id) + if !ok { + return nil, ErrImageUnknown + } + return ioutil.ReadFile(r.datapath(image.ID, key)) +} + +func (r *imageStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") + } + image, ok := r.lookup(id) + if !ok { + return -1, ErrImageUnknown + } + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + if size, ok := image.BigDataSizes[key]; ok { + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + return int64(len(data)), nil + } + return -1, ErrSizeUnknown +} + +func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") + } + image, ok := r.lookup(id) + if !ok { + return "", ErrImageUnknown + } + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := image.BigDataDigests[key]; ok { + return d, nil + } + return "", ErrDigestUnknown +} + +func (r *imageStore) BigDataNames(id string) ([]string, error) { + image, ok := r.lookup(id) + if !ok { + return nil, ErrImageUnknown + } + return copyStringSlice(image.BigDataNames), nil +} + +func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { + modified := make([]*Image, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") + } + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return ErrImageUnknown + } + err := os.MkdirAll(r.datadir(image.ID), 0700) + if err != nil { + return err + } + var newDigest digest.Digest + if bigDataNameIsManifest(key) { + if digestManifest == nil { + return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided") + } + if newDigest, err = digestManifest(data); err != nil { + return errors.Wrapf(err, "error digesting manifest") + } + } else { + newDigest = digest.Canonical.FromBytes(data) + } + err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) + if err == nil { + save := false + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := image.BigDataSizes[key] + image.BigDataSizes[key] = int64(len(data)) + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := image.BigDataDigests[key] + image.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range image.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + image.BigDataNames = append(image.BigDataNames, key) + save = true + } + for _, oldDigest := range image.Digests { + // remove the image from the list of images in the digest-based index + if list, ok := r.bydigest[oldDigest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, oldDigest) + } else { + r.bydigest[oldDigest] = prunedList + } + } + } + if err = image.recomputeDigests(); err != nil { + return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID) + } + for _, newDigest := range image.Digests { + // add the image to the list of images in the digest-based index which + // corresponds to the new digest for this item, unless it's already there + list := r.bydigest[newDigest] + if len(list) == len(imageSliceWithoutValue(list, image)) { + // the list isn't shortened by trying to prune this image from it, + // so it's not in there yet + r.bydigest[newDigest] = append(list, image) + } + } + if save { + err = r.Save() + } + } + return err +} + +func (r *imageStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *imageStore) Lock() { + r.lockfile.Lock() +} + +func (r *imageStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *imageStore) RLock() { + r.lockfile.RLock() +} + +func (r *imageStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *imageStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *imageStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *imageStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *imageStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *imageStore) Locked() bool { + return r.lockfile.Locked() +} diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go new file mode 100644 index 0000000..6b40ebd --- /dev/null +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -0,0 +1,1310 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: images.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Image) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Digest) != 0 { + buf.WriteString(`"digest":`) + fflib.WriteJsonString(buf, string(j.Digest)) + buf.WriteByte(',') + } + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.TopLayer) != 0 { + buf.WriteString(`"layer":`) + fflib.WriteJsonString(buf, string(j.TopLayer)) + buf.WriteByte(',') + } + if len(j.MappedTopLayers) != 0 { + buf.WriteString(`"mapped-layers":`) + if j.MappedTopLayers != nil { + buf.WriteString(`[`) + for i, v := range j.MappedTopLayers { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.BigDataNames) != 0 { + buf.WriteString(`"big-data-names":`) + if j.BigDataNames != nil { + buf.WriteString(`[`) + for i, v := range j.BigDataNames { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.BigDataSizes) != 0 { + if j.BigDataSizes == nil { + buf.WriteString(`"big-data-sizes":null`) + } else { + buf.WriteString(`"big-data-sizes":{ `) + for key, value := range j.BigDataSizes { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.FormatBits2(buf, uint64(value), 10, value < 0) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if len(j.BigDataDigests) != 0 { + if j.BigDataDigests == nil { + buf.WriteString(`"big-data-digests":null`) + } else { + buf.WriteString(`"big-data-digests":{ `) + for key, value := range j.BigDataDigests { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtImagebase = iota + ffjtImagenosuchkey + + ffjtImageID + + ffjtImageDigest + + ffjtImageNames + + ffjtImageTopLayer + + ffjtImageMappedTopLayers + + ffjtImageMetadata + + ffjtImageBigDataNames + + ffjtImageBigDataSizes + + ffjtImageBigDataDigests + + ffjtImageCreated + + ffjtImageFlags +) + +var ffjKeyImageID = []byte("id") + +var ffjKeyImageDigest = []byte("digest") + +var ffjKeyImageNames = []byte("names") + +var ffjKeyImageTopLayer = []byte("layer") + +var ffjKeyImageMappedTopLayers = []byte("mapped-layers") + +var ffjKeyImageMetadata = []byte("metadata") + +var ffjKeyImageBigDataNames = []byte("big-data-names") + +var ffjKeyImageBigDataSizes = []byte("big-data-sizes") + +var ffjKeyImageBigDataDigests = []byte("big-data-digests") + +var ffjKeyImageCreated = []byte("created") + +var ffjKeyImageFlags = []byte("flags") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Image) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Image) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtImagebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtImagenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffjKeyImageBigDataNames, kn) { + currentKey = ffjtImageBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageBigDataSizes, kn) { + currentKey = ffjtImageBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageBigDataDigests, kn) { + currentKey = ffjtImageBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffjKeyImageCreated, kn) { + currentKey = ffjtImageCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyImageFlags, kn) { + currentKey = ffjtImageFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyImageID, kn) { + currentKey = ffjtImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffjKeyImageTopLayer, kn) { + currentKey = ffjtImageTopLayer + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyImageMappedTopLayers, kn) { + currentKey = ffjtImageMappedTopLayers + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageMetadata, kn) { + currentKey = ffjtImageMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyImageNames, kn) { + currentKey = ffjtImageNames + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyImageFlags, kn) { + currentKey = ffjtImageFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageCreated, kn) { + currentKey = ffjtImageCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataDigests, kn) { + currentKey = ffjtImageBigDataDigests + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataSizes, kn) { + currentKey = ffjtImageBigDataSizes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageBigDataNames, kn) { + currentKey = ffjtImageBigDataNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageMetadata, kn) { + currentKey = ffjtImageMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageMappedTopLayers, kn) { + currentKey = ffjtImageMappedTopLayers + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) { + currentKey = ffjtImageTopLayer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageNames, kn) { + currentKey = ffjtImageNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) { + currentKey = ffjtImageID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtImagenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtImageID: + goto handle_ID + + case ffjtImageDigest: + goto handle_Digest + + case ffjtImageNames: + goto handle_Names + + case ffjtImageTopLayer: + goto handle_TopLayer + + case ffjtImageMappedTopLayers: + goto handle_MappedTopLayers + + case ffjtImageMetadata: + goto handle_Metadata + + case ffjtImageBigDataNames: + goto handle_BigDataNames + + case ffjtImageBigDataSizes: + goto handle_BigDataSizes + + case ffjtImageBigDataDigests: + goto handle_BigDataDigests + + case ffjtImageCreated: + goto handle_Created + + case ffjtImageFlags: + goto handle_Flags + + case ffjtImagenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Digest: + + /* handler: j.Digest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Digest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TopLayer: + + /* handler: j.TopLayer type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.TopLayer = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MappedTopLayers: + + /* handler: j.MappedTopLayers type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.MappedTopLayers = nil + } else { + + j.MappedTopLayers = []string{} + + wantVal := true + + for { + + var tmpJMappedTopLayers string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJMappedTopLayers type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJMappedTopLayers = string(string(outBuf)) + + } + } + + j.MappedTopLayers = append(j.MappedTopLayers, tmpJMappedTopLayers) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataNames: + + /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataNames = nil + } else { + + j.BigDataNames = []string{} + + wantVal := true + + for { + + var tmpJBigDataNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataNames = string(string(outBuf)) + + } + } + + j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataSizes: + + /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataSizes = nil + } else { + + j.BigDataSizes = make(map[string]int64, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataSizes int64 + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmpJBigDataSizes = int64(tval) + + } + } + + j.BigDataSizes[k] = tmpJBigDataSizes + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BigDataDigests: + + /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.BigDataDigests = nil + } else { + + j.BigDataDigests = make(map[string]digest.Digest, 0) + + wantVal := true + + for { + + var k string + + var tmpJBigDataDigests digest.Digest + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJBigDataDigests = digest.Digest(string(outBuf)) + + } + } + + j.BigDataDigests[k] = tmpJBigDataDigests + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *imageStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *imageStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtimageStorebase = iota + ffjtimageStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *imageStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *imageStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtimageStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtimageStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtimageStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtimageStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go new file mode 100644 index 0000000..fb79238 --- /dev/null +++ b/vendor/github.com/containers/storage/layers.go @@ -0,0 +1,1355 @@ +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/truncindex" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + tarSplitSuffix = ".tar-split.gz" + incompleteFlag = "incomplete" +) + +// A Layer is a record of a copy-on-write layer that's stored by the lower +// level graph driver. +type Layer struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // layer can be referred to by its ID or any of its names. Names are + // unique among layers. + Names []string `json:"names,omitempty"` + + // Parent is the ID of a layer from which this layer inherits data. + Parent string `json:"parent,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // MountLabel is an SELinux label which should be used when attempting to mount + // the layer. + MountLabel string `json:"mountlabel,omitempty"` + + // MountPoint is the path where the layer is mounted, or where it was most + // recently mounted. This can change between subsequent Unmount() and + // Mount() calls, so the caller should consult this value after Mount() + // succeeds to find the location of the container's root filesystem. + MountPoint string `json:"-"` + + // MountCount is used as a reference count for the container's layer being + // mounted at the mount point. + MountCount int `json:"-"` + + // Created is the datestamp for when this layer was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // CompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. + CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"` + + // CompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. If + // CompressedDigest is not set, this should be treated as if it were an + // uninitialized value. + CompressedSize int64 `json:"compressed-size,omitempty"` + + // UncompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. Often referred to + // as a DiffID. + UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` + + // UncompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. If + // UncompressedDigest is not set, this should be treated as if it were + // an uninitialized value. + UncompressedSize int64 `json:"diff-size,omitempty"` + + // CompressionType is the type of compression which we detected on the blob + // that was last passed to ApplyDiff() or Put(). + CompressionType archive.Compression `json:"compression,omitempty"` + + // Flags is arbitrary data about the layer. + Flags map[string]interface{} `json:"flags,omitempty"` + + // UIDMap and GIDMap are used for setting up a layer's contents + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + // ReadOnly is true if this layer resides in a read-only layer store. + ReadOnly bool `json:"-"` +} + +type layerMountPoint struct { + ID string `json:"id"` + MountPoint string `json:"path"` + MountCount int `json:"count"` +} + +// DiffOptions override the default behavior of Diff() methods. +type DiffOptions struct { + // Compression, if set overrides the default compressor when generating a diff. + Compression *archive.Compression +} + +// ROLayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type ROLayerStore interface { + ROFileBasedStore + ROMetadataStore + + // Exists checks if a layer with the specified name or ID is known. + Exists(id string) bool + + // Get retrieves information about a layer given an ID or name. + Get(id string) (*Layer, error) + + // Status returns an slice of key-value pairs, suitable for human consumption, + // relaying whatever status information the underlying driver can share. + Status() ([][2]string, error) + + // Changes returns a slice of Change structures, which contain a pathname + // (Path) and a description of what sort of change (Kind) was made by the + // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a + // specified layer. By default, the layer's parent is used as a reference. + Changes(from, to string) ([]archive.Change, error) + + // Diff produces a tarstream which can be applied to a layer with the contents + // of the first layer to produce a layer with the contents of the second layer. + // By default, the parent of the second layer is used as the first + // layer, so it need not be specified. Options can be used to override + // default behavior, but are also not required. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // DiffSize produces an estimate of the length of the tarstream which would be + // produced by Diff. + DiffSize(from, to string) (int64, error) + + // Size produces a cached value for the uncompressed size of the layer, + // if one is known, or -1 if it is not known. If the layer can not be + // found, it returns an error. + Size(name string) (int64, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // Layers returns a slice of the known layers. + Layers() ([]Layer, error) +} + +// LayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type LayerStore interface { + ROLayerStore + RWFileBasedStore + RWMetadataStore + FlaggableStore + + // Create creates a new layer, optionally giving it a specified ID rather than + // a randomly-generated one, either inheriting data from another specified + // layer or the empty base layer. The new layer can optionally be given names + // and have an SELinux label specified for use when mounting it. Some + // underlying drivers can accept a "size" option. At this time, most + // underlying drivers do not themselves distinguish between writeable + // and read-only layers. + Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) + + // CreateWithFlags combines the functions of Create and SetFlag. + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + + // Put combines the functions of CreateWithFlags and ApplyDiff. + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + + // SetNames replaces the list of names associated with a layer with the + // supplied values. + SetNames(id string, names []string) error + + // Delete deletes a layer with the specified name or ID. + Delete(id string) error + + // Wipe deletes all layers. + Wipe() error + + // Mount mounts a layer for use. If the specified layer is the parent of other + // layers, it should not be written to. An SELinux label to be applied to the + // mount can be specified to override the one configured for the layer. + // The mappings used by the container can be specified. + Mount(id string, options drivers.MountOpts) (string, error) + + // Unmount unmounts a layer when it is no longer in use. + Unmount(id string, force bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + Mounted(id string) (int, error) + + // ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint + // for which the layer's UID and GID maps don't contain corresponding entries. + ParentOwners(id string) (uids, gids []int, err error) + + // ApplyDiff reads a tarstream which was created by a previous call to Diff and + // applies its changes to a specified layer. + ApplyDiff(to string, diff io.Reader) (int64, error) +} + +type layerStore struct { + lockfile Locker + mountsLockfile Locker + rundir string + driver drivers.Driver + layerdir string + layers []*Layer + idindex *truncindex.TruncIndex + byid map[string]*Layer + byname map[string]*Layer + bymount map[string]*Layer + bycompressedsum map[digest.Digest][]string + byuncompressedsum map[digest.Digest][]string + uidMap []idtools.IDMap + gidMap []idtools.IDMap +} + +func copyLayer(l *Layer) *Layer { + return &Layer{ + ID: l.ID, + Names: copyStringSlice(l.Names), + Parent: l.Parent, + Metadata: l.Metadata, + MountLabel: l.MountLabel, + MountPoint: l.MountPoint, + MountCount: l.MountCount, + Created: l.Created, + CompressedDigest: l.CompressedDigest, + CompressedSize: l.CompressedSize, + UncompressedDigest: l.UncompressedDigest, + UncompressedSize: l.UncompressedSize, + CompressionType: l.CompressionType, + ReadOnly: l.ReadOnly, + Flags: copyStringInterfaceMap(l.Flags), + UIDMap: copyIDMap(l.UIDMap), + GIDMap: copyIDMap(l.GIDMap), + } +} + +func (r *layerStore) Layers() ([]Layer, error) { + layers := make([]Layer, len(r.layers)) + for i := range r.layers { + layers[i] = *copyLayer(r.layers[i]) + } + return layers, nil +} + +func (r *layerStore) mountspath() string { + return filepath.Join(r.rundir, "mountpoints.json") +} + +func (r *layerStore) layerspath() string { + return filepath.Join(r.layerdir, "layers.json") +} + +func (r *layerStore) Load() error { + shouldSave := false + rpath := r.layerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layers := []*Layer{} + idlist := []string{} + ids := make(map[string]*Layer) + names := make(map[string]*Layer) + compressedsums := make(map[digest.Digest][]string) + uncompressedsums := make(map[digest.Digest][]string) + if r.lockfile.IsReadWrite() { + label.ClearLabels() + } + if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(layers)) + for n, layer := range layers { + ids[layer.ID] = layers[n] + idlist = append(idlist, layer.ID) + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + shouldSave = true + } + names[name] = layers[n] + } + if layer.CompressedDigest != "" { + compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + if layer.MountLabel != "" { + label.ReserveLabel(layer.MountLabel) + } + layer.ReadOnly = !r.IsReadWrite() + } + err = nil + } + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { + return ErrDuplicateLayerNames + } + r.layers = layers + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.byname = names + r.bycompressedsum = compressedsums + r.byuncompressedsum = uncompressedsums + // Load and merge information about which layers are mounted, and where. + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if err = r.loadMounts(); err != nil { + return err + } + } + // Last step: if we're writable, try to remove anything that a previous + // user of this storage area marked for deletion but didn't manage to + // actually delete. + if r.IsReadWrite() && r.Locked() { + for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + if cleanup, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := cleanup.(bool); ok && b { + err = r.Delete(layer.ID) + if err != nil { + break + } + shouldSave = true + } + } + } + if shouldSave { + return r.Save() + } + } + return err +} + +func (r *layerStore) loadMounts() error { + mounts := make(map[string]*Layer) + mpath := r.mountspath() + data, err := ioutil.ReadFile(mpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layerMounts := []layerMountPoint{} + if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := r.lookup(mount.ID); ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount + } + } + } + err = nil + } + r.bymount = mounts + return err +} + +func (r *layerStore) Save() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } + if !r.Locked() { + return errors.New("layer store is not locked for writing") + } + rpath := r.layerspath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jldata, err := json.Marshal(&r.layers) + if err != nil { + return err + } + defer r.Touch() + if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { + return err + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + defer r.mountsLockfile.Touch() + return r.saveMounts() +} + +func (r *layerStore) saveMounts() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } + if !r.mountsLockfile.Locked() { + return errors.New("layer store mount information is not locked for writing") + } + mpath := r.mountspath() + if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { + return err + } + mounts := make([]layerMountPoint, 0, len(r.layers)) + for _, layer := range r.layers { + if layer.MountPoint != "" && layer.MountCount > 0 { + mounts = append(mounts, layerMountPoint{ + ID: layer.ID, + MountPoint: layer.MountPoint, + MountCount: layer.MountCount, + }) + } + } + jmdata, err := json.Marshal(&mounts) + if err != nil { + return err + } + if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { + return err + } + return r.loadMounts() +} + +func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) { + if err := os.MkdirAll(rundir, 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(layerdir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock")) + if err != nil { + return nil, err + } + rlstore := layerStore{ + lockfile: lockfile, + mountsLockfile: mountsLockfile, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + uidMap: copyIDMap(uidMap), + gidMap: copyIDMap(gidMap), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) { + lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + rlstore := layerStore{ + lockfile: lockfile, + mountsLockfile: nil, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func (r *layerStore) lookup(id string) (*Layer, bool) { + if layer, ok := r.byid[id]; ok { + return layer, ok + } else if layer, ok := r.byname[id]; ok { + return layer, ok + } else if longid, err := r.idindex.Get(id); err == nil { + layer, ok := r.byid[longid] + return layer, ok + } + return nil, false +} + +func (r *layerStore) Size(name string) (int64, error) { + layer, ok := r.lookup(name) + if !ok { + return -1, ErrLayerUnknown + } + // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that + // a zero value is not just present because it was never set to anything else (which can happen if the layer was + // created by a version of this library that didn't keep track of digest and size information). + if layer.UncompressedDigest != "" { + return layer.UncompressedSize, nil + } + return -1, nil +} + +func (r *layerStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + delete(layer.Flags, flag) + return r.Save() +} + +func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[flag] = value + return r.Save() +} + +func (r *layerStore) Status() ([][2]string, error) { + return r.driver.Status(), nil +} + +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { + if !r.IsReadWrite() { + return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) + } + size = -1 + if err := os.MkdirAll(r.rundir, 0700); err != nil { + return nil, -1, err + } + if err := os.MkdirAll(r.layerdir, 0700); err != nil { + return nil, -1, err + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if duplicateLayer, idInUse := r.byid[id]; idInUse { + return duplicateLayer, -1, ErrDuplicateID + } + names = dedupeNames(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, -1, ErrDuplicateName + } + } + parent := "" + if parentLayer != nil { + parent = parentLayer.ID + } + var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + if moreOptions.TemplateLayer != "" { + templateLayer, ok := r.lookup(moreOptions.TemplateLayer) + if !ok { + return nil, -1, ErrLayerUnknown + } + templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + } else { + templateIDMappings = &idtools.IDMappings{} + } + if parentLayer != nil { + parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) + } else { + parentMappings = &idtools.IDMappings{} + } + if mountLabel != "" { + label.ReserveLabel(mountLabel) + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) + opts := drivers.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: options, + IDMappings: idMappings, + } + if moreOptions.TemplateLayer != "" { + if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + if id != "" { + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) + } + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer) + } + oldMappings = templateIDMappings + } else { + if writeable { + if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { + if id != "" { + return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) + } + return nil, -1, errors.Wrapf(err, "error creating read-write layer") + } + } else { + if err = r.driver.Create(id, parent, &opts); err != nil { + if id != "" { + return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) + } + return nil, -1, errors.Wrapf(err, "error creating layer") + } + } + oldMappings = parentMappings + } + if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { + if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + } + if err == nil { + layer = &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Created: time.Now().UTC(), + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + } + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value + } + if diff != nil { + layer.Flags[incompleteFlag] = true + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + size, err = r.ApplyDiff(layer.ID, diff) + if err != nil { + if r.Delete(layer.ID) != nil { + // Either a driver error or an error saving. + // We now have a layer that's been marked for + // deletion but which we failed to remove. + } + return nil, -1, err + } + delete(layer.Flags, incompleteFlag) + } + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + layer = copyLayer(layer) + } + return layer, size, err +} + +func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { + layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil) + return layer, err +} + +func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) { + return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil) +} + +func (r *layerStore) Mounted(id string) (int, error) { + if !r.IsReadWrite() { + return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return 0, err + } + } + layer, ok := r.lookup(id) + if !ok { + return 0, ErrLayerUnknown + } + return layer.MountCount, nil +} + +func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { + if !r.IsReadWrite() { + return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return "", err + } + } + defer r.mountsLockfile.Touch() + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + if layer.MountCount > 0 { + layer.MountCount++ + return layer.MountPoint, r.saveMounts() + } + if options.MountLabel == "" { + options.MountLabel = layer.MountLabel + } + + if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() { + if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) { + return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID) + } + } + mountpoint, err := r.driver.Get(id, options) + if mountpoint != "" && err == nil { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountPoint = filepath.Clean(mountpoint) + layer.MountCount++ + r.bymount[layer.MountPoint] = layer + err = r.saveMounts() + } + return mountpoint, err +} + +func (r *layerStore) Unmount(id string, force bool) (bool, error) { + if !r.IsReadWrite() { + return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return false, err + } + } + defer r.mountsLockfile.Touch() + layer, ok := r.lookup(id) + if !ok { + layerByMount, ok := r.bymount[filepath.Clean(id)] + if !ok { + return false, ErrLayerUnknown + } + layer = layerByMount + } + if force { + layer.MountCount = 1 + } + if layer.MountCount > 1 { + layer.MountCount-- + return true, r.saveMounts() + } + err := r.driver.Put(id) + if err == nil || os.IsNotExist(err) { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountCount-- + layer.MountPoint = "" + return false, r.saveMounts() + } + return true, err +} + +func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { + if !r.IsReadWrite() { + return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return nil, nil, err + } + } + layer, ok := r.lookup(id) + if !ok { + return nil, nil, ErrLayerUnknown + } + if len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + // We're not using any mappings, so there aren't any unmapped IDs on parent directories. + return nil, nil, nil + } + if layer.MountPoint == "" { + // We don't know which directories to examine. + return nil, nil, ErrLayerNotMounted + } + rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID) + } + m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + fsuids := make(map[int]struct{}) + fsgids := make(map[int]struct{}) + for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + st, err := system.Stat(dir) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading ownership of directory %q", dir) + } + lst, err := system.Lstat(dir) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading ownership of directory-in-case-it's-a-symlink %q", dir) + } + fsuid := int(st.UID()) + fsgid := int(st.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + fsuid = int(lst.UID()) + fsgid = int(lst.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + } + for uid := range fsuids { + uids = append(uids, uid) + } + for gid := range fsgids { + gids = append(gids, gid) + } + if len(uids) > 1 { + sort.Ints(uids) + } + if len(gids) > 1 { + sort.Ints(gids) + } + return uids, gids, nil +} + +func (r *layerStore) removeName(layer *Layer, name string) { + layer.Names = stringSliceWithoutValue(layer.Names, name) +} + +func (r *layerStore) SetNames(id string, names []string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) + } + names = dedupeNames(names) + if layer, ok := r.lookup(id); ok { + for _, name := range layer.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) + } + r.byname[name] = layer + } + layer.Names = names + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) Metadata(id string) (string, error) { + if layer, ok := r.lookup(id); ok { + return layer.Metadata, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath()) + } + if layer, ok := r.lookup(id); ok { + layer.Metadata = metadata + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) tspath(id string) string { + return filepath.Join(r.layerdir, id+tarSplitSuffix) +} + +func (r *layerStore) Delete(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + id = layer.ID + // The layer may already have been explicitly unmounted, but if not, we + // should try to clean that up before we start deleting anything at the + // driver level. + mountCount, err := r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } + for mountCount > 0 { + if _, err := r.Unmount(id, false); err != nil { + return err + } + mountCount, err = r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } + } + err = r.driver.Remove(id) + if err == nil { + os.Remove(r.tspath(id)) + delete(r.byid, id) + r.idindex.Delete(id) + mountLabel := layer.MountLabel + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + toDeleteIndex := -1 + for i, candidate := range r.layers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + if toDeleteIndex != -1 { + // delete the layer at toDeleteIndex + if toDeleteIndex == len(r.layers)-1 { + r.layers = r.layers[:len(r.layers)-1] + } else { + r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) + } + } + if mountLabel != "" { + var found bool + for _, candidate := range r.layers { + if candidate.MountLabel == mountLabel { + found = true + break + } + } + if !found { + label.ReleaseLabel(mountLabel) + } + } + if err = r.Save(); err != nil { + return err + } + } + return err +} + +func (r *layerStore) Lookup(name string) (id string, err error) { + if layer, ok := r.lookup(name); ok { + return layer.ID, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *layerStore) Get(id string) (*Layer, error) { + if layer, ok := r.lookup(id); ok { + return copyLayer(layer), nil + } + return nil, ErrLayerUnknown +} + +func (r *layerStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) { + var ok bool + toLayer, ok = r.lookup(to) + if !ok { + return "", "", nil, nil, ErrLayerUnknown + } + to = toLayer.ID + if from == "" { + from = toLayer.Parent + } + if from != "" { + fromLayer, ok = r.lookup(from) + if ok { + from = fromLayer.ID + } else { + fromLayer, ok = r.lookup(toLayer.Parent) + if ok { + from = fromLayer.ID + } + } + } + return from, to, fromLayer, toLayer, nil +} + +func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { + if layer == nil { + return &idtools.IDMappings{} + } + return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) +} + +func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + return r.driver.Changes(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +type simpleGetCloser struct { + r *layerStore + path string + id string +} + +func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { + return os.Open(filepath.Join(s.path, path)) +} + +func (s *simpleGetCloser) Close() error { + _, err := s.r.Unmount(s.id, false) + return err +} + +func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { + if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { + return getter.DiffGetter(id) + } + path, err := r.Mount(id, drivers.MountOpts{}) + if err != nil { + return nil, err + } + return &simpleGetCloser{ + r: r, + path: path, + id: id, + }, nil +} + +func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + var metadata storage.Unpacker + + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + // Default to applying the type of compression that we noted was used + // for the layerdiff when it was applied. + compression := toLayer.CompressionType + // If a particular compression type (or no compression) was selected, + // use that instead. + if options != nil && options.Compression != nil { + compression = *options.Compression + } + maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) { + // Depending on whether or not compression is desired, return either the + // passed-in ReadCloser, or a new one that provides its readers with a + // compressed version of the data that the original would have provided + // to its readers. + if compression == archive.Uncompressed { + return rc, nil + } + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + rc.Close() + pwriter.Close() + preader.Close() + return nil, err + } + go func() { + defer pwriter.Close() + defer compressor.Close() + defer rc.Close() + io.Copy(compressor, rc) + }() + return preader, nil + } + + if from != toLayer.Parent { + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + + tsfile, err := os.Open(r.tspath(to)) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + defer tsfile.Close() + + decompressor, err := pgzip.NewReader(tsfile) + if err != nil { + return nil, err + } + defer decompressor.Close() + + tsbytes, err := ioutil.ReadAll(decompressor) + if err != nil { + return nil, err + } + + metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + + fgetter, err := r.newFileGetter(to) + if err != nil { + return nil, err + } + + tarstream := asm.NewOutputTarStream(fgetter, metadata) + rc := ioutils.NewReadCloserWrapper(tarstream, func() error { + err1 := tarstream.Close() + err2 := fgetter.Close() + if err2 == nil { + return err1 + } + return err2 + }) + return maybeCompressReadCloser(rc) +} + +func (r *layerStore) DiffSize(from, to string) (size int64, err error) { + var fromLayer, toLayer *Layer + from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to) + if err != nil { + return -1, ErrLayerUnknown + } + return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { + if !r.IsReadWrite() { + return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) + } + + layer, ok := r.lookup(to) + if !ok { + return -1, ErrLayerUnknown + } + + header := make([]byte, 10240) + n, err := diff.Read(header) + if err != nil && err != io.EOF { + return -1, err + } + + compression := archive.DetectCompression(header[:n]) + compressedDigest := digest.Canonical.Digester() + compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash()) + defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter) + + tsdata := bytes.Buffer{} + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + compressor = pgzip.NewWriter(&tsdata) + } + metadata := storage.NewJSONPacker(compressor) + uncompressed, err := archive.DecompressStream(defragmented) + if err != nil { + return -1, err + } + uncompressedDigest := digest.Canonical.Digester() + uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash()) + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + size, err = r.driver.ApplyDiff(layer.ID, r.layerMappings(layer), layer.Parent, layer.MountLabel, payload) + if err != nil { + return -1, err + } + compressor.Close() + if err == nil { + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { + return -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + return -1, err + } + } + + updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { + var newList []string + if oldvalue != "" { + for _, value := range (*m)[oldvalue] { + if value != id { + newList = append(newList, value) + } + } + if len(newList) > 0 { + (*m)[oldvalue] = newList + } else { + delete(*m, oldvalue) + } + } + if newvalue != "" { + (*m)[newvalue] = append((*m)[newvalue], id) + } + } + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID) + layer.CompressedDigest = compressedDigest.Digest() + layer.CompressedSize = compressedCounter.Count + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID) + layer.UncompressedDigest = uncompressedDigest.Digest() + layer.UncompressedSize = uncompressedCounter.Count + layer.CompressionType = compression + + err = r.Save() + + return size, err +} + +func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { + var layers []Layer + for _, layerID := range m[d] { + layer, ok := r.lookup(layerID) + if !ok { + return nil, ErrLayerUnknown + } + layers = append(layers, *copyLayer(layer)) + } + return layers, nil +} + +func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.bycompressedsum, d) +} + +func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.byuncompressedsum, d) +} + +func (r *layerStore) Lock() { + r.lockfile.Lock() +} + +func (r *layerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *layerStore) RLock() { + r.lockfile.RLock() +} + +func (r *layerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *layerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *layerStore) Modified() (bool, error) { + var mmodified bool + lmodified, err := r.lockfile.Modified() + if err != nil { + return lmodified, err + } + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + mmodified, err = r.mountsLockfile.Modified() + if err != nil { + return lmodified, err + } + } + return lmodified || mmodified, nil +} + +func (r *layerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *layerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *layerStore) Locked() bool { + return r.lockfile.Locked() +} diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go new file mode 100644 index 0000000..125b5d8 --- /dev/null +++ b/vendor/github.com/containers/storage/layers_ffjson.go @@ -0,0 +1,1932 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: layers.go + +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/go-digest" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *DiffOptions) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if j.Compression != nil { + buf.WriteString(`{"Compression":`) + fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0) + } else { + buf.WriteString(`{"Compression":null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjtDiffOptionsbase = iota + ffjtDiffOptionsnosuchkey + + ffjtDiffOptionsCompression +) + +var ffjKeyDiffOptionsCompression = []byte("Compression") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *DiffOptions) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtDiffOptionsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtDiffOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'C': + + if bytes.Equal(ffjKeyDiffOptionsCompression, kn) { + currentKey = ffjtDiffOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) { + currentKey = ffjtDiffOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtDiffOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtDiffOptionsCompression: + goto handle_Compression + + case ffjtDiffOptionsnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Compression: + + /* handler: j.Compression type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + j.Compression = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := archive.Compression(tval) + j.Compression = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *Layer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteByte(',') + if len(j.Names) != 0 { + buf.WriteString(`"names":`) + if j.Names != nil { + buf.WriteString(`[`) + for i, v := range j.Names { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.Parent) != 0 { + buf.WriteString(`"parent":`) + fflib.WriteJsonString(buf, string(j.Parent)) + buf.WriteByte(',') + } + if len(j.Metadata) != 0 { + buf.WriteString(`"metadata":`) + fflib.WriteJsonString(buf, string(j.Metadata)) + buf.WriteByte(',') + } + if len(j.MountLabel) != 0 { + buf.WriteString(`"mountlabel":`) + fflib.WriteJsonString(buf, string(j.MountLabel)) + buf.WriteByte(',') + } + if true { + buf.WriteString(`"created":`) + + { + + obj, err = j.Created.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + if len(j.CompressedDigest) != 0 { + buf.WriteString(`"compressed-diff-digest":`) + fflib.WriteJsonString(buf, string(j.CompressedDigest)) + buf.WriteByte(',') + } + if j.CompressedSize != 0 { + buf.WriteString(`"compressed-size":`) + fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0) + buf.WriteByte(',') + } + if len(j.UncompressedDigest) != 0 { + buf.WriteString(`"diff-digest":`) + fflib.WriteJsonString(buf, string(j.UncompressedDigest)) + buf.WriteByte(',') + } + if j.UncompressedSize != 0 { + buf.WriteString(`"diff-size":`) + fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0) + buf.WriteByte(',') + } + if j.CompressionType != 0 { + buf.WriteString(`"compression":`) + fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0) + buf.WriteByte(',') + } + if len(j.Flags) != 0 { + buf.WriteString(`"flags":`) + /* Falling back. type=map[string]interface {} kind=map */ + err = buf.Encode(j.Flags) + if err != nil { + return err + } + buf.WriteByte(',') + } + if len(j.UIDMap) != 0 { + buf.WriteString(`"uidmap":`) + if j.UIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.UIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if len(j.GIDMap) != 0 { + buf.WriteString(`"gidmap":`) + if j.GIDMap != nil { + buf.WriteString(`[`) + for i, v := range j.GIDMap { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffjtLayerbase = iota + ffjtLayernosuchkey + + ffjtLayerID + + ffjtLayerNames + + ffjtLayerParent + + ffjtLayerMetadata + + ffjtLayerMountLabel + + ffjtLayerCreated + + ffjtLayerCompressedDigest + + ffjtLayerCompressedSize + + ffjtLayerUncompressedDigest + + ffjtLayerUncompressedSize + + ffjtLayerCompressionType + + ffjtLayerFlags + + ffjtLayerUIDMap + + ffjtLayerGIDMap +) + +var ffjKeyLayerID = []byte("id") + +var ffjKeyLayerNames = []byte("names") + +var ffjKeyLayerParent = []byte("parent") + +var ffjKeyLayerMetadata = []byte("metadata") + +var ffjKeyLayerMountLabel = []byte("mountlabel") + +var ffjKeyLayerCreated = []byte("created") + +var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest") + +var ffjKeyLayerCompressedSize = []byte("compressed-size") + +var ffjKeyLayerUncompressedDigest = []byte("diff-digest") + +var ffjKeyLayerUncompressedSize = []byte("diff-size") + +var ffjKeyLayerCompressionType = []byte("compression") + +var ffjKeyLayerFlags = []byte("flags") + +var ffjKeyLayerUIDMap = []byte("uidmap") + +var ffjKeyLayerGIDMap = []byte("gidmap") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Layer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtLayerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtLayernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffjKeyLayerCreated, kn) { + currentKey = ffjtLayerCreated + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) { + currentKey = ffjtLayerCompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) { + currentKey = ffjtLayerCompressedSize + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerCompressionType, kn) { + currentKey = ffjtLayerCompressionType + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) { + currentKey = ffjtLayerUncompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) { + currentKey = ffjtLayerUncompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffjKeyLayerFlags, kn) { + currentKey = ffjtLayerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffjKeyLayerGIDMap, kn) { + currentKey = ffjtLayerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeyLayerID, kn) { + currentKey = ffjtLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffjKeyLayerMetadata, kn) { + currentKey = ffjtLayerMetadata + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyLayerMountLabel, kn) { + currentKey = ffjtLayerMountLabel + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffjKeyLayerNames, kn) { + currentKey = ffjtLayerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffjKeyLayerParent, kn) { + currentKey = ffjtLayerParent + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffjKeyLayerUIDMap, kn) { + currentKey = ffjtLayerUIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerGIDMap, kn) { + currentKey = ffjtLayerGIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerUIDMap, kn) { + currentKey = ffjtLayerUIDMap + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) { + currentKey = ffjtLayerFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) { + currentKey = ffjtLayerCompressionType + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) { + currentKey = ffjtLayerUncompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) { + currentKey = ffjtLayerUncompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) { + currentKey = ffjtLayerCompressedSize + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) { + currentKey = ffjtLayerCompressedDigest + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) { + currentKey = ffjtLayerCreated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) { + currentKey = ffjtLayerMountLabel + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) { + currentKey = ffjtLayerMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) { + currentKey = ffjtLayerParent + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyLayerNames, kn) { + currentKey = ffjtLayerNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) { + currentKey = ffjtLayerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtLayernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtLayerID: + goto handle_ID + + case ffjtLayerNames: + goto handle_Names + + case ffjtLayerParent: + goto handle_Parent + + case ffjtLayerMetadata: + goto handle_Metadata + + case ffjtLayerMountLabel: + goto handle_MountLabel + + case ffjtLayerCreated: + goto handle_Created + + case ffjtLayerCompressedDigest: + goto handle_CompressedDigest + + case ffjtLayerCompressedSize: + goto handle_CompressedSize + + case ffjtLayerUncompressedDigest: + goto handle_UncompressedDigest + + case ffjtLayerUncompressedSize: + goto handle_UncompressedSize + + case ffjtLayerCompressionType: + goto handle_CompressionType + + case ffjtLayerFlags: + goto handle_Flags + + case ffjtLayerUIDMap: + goto handle_UIDMap + + case ffjtLayerGIDMap: + goto handle_GIDMap + + case ffjtLayernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Names: + + /* handler: j.Names type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Names = nil + } else { + + j.Names = []string{} + + wantVal := true + + for { + + var tmpJNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJNames = string(string(outBuf)) + + } + } + + j.Names = append(j.Names, tmpJNames) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Parent: + + /* handler: j.Parent type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Parent = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: j.Metadata type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Metadata = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountLabel: + + /* handler: j.MountLabel type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.MountLabel = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Created: + + /* handler: j.Created type=time.Time kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + } else { + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = j.Created.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressedDigest: + + /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.CompressedDigest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressedSize: + + /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.CompressedSize = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UncompressedDigest: + + /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.UncompressedDigest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UncompressedSize: + + /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.UncompressedSize = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompressionType: + + /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.CompressionType = archive.Compression(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Flags: + + /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.Flags = nil + } else { + + j.Flags = make(map[string]interface{}, 0) + + wantVal := true + + for { + + var k string + + var tmpJFlags interface{} + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJFlags) + if err != nil { + return fs.WrapErr(err) + } + } + + j.Flags[k] = tmpJFlags + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UIDMap: + + /* handler: j.UIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.UIDMap = nil + } else { + + j.UIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJUIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJUIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJUIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.UIDMap = append(j.UIDMap, tmpJUIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GIDMap: + + /* handler: j.GIDMap type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.GIDMap = nil + } else { + + j.GIDMap = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJGIDMap idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJGIDMap type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJGIDMap) + if err != nil { + return fs.WrapErr(err) + } + } + + j.GIDMap = append(j.GIDMap, tmpJGIDMap) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *layerMountPoint) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"id":`) + fflib.WriteJsonString(buf, string(j.ID)) + buf.WriteString(`,"path":`) + fflib.WriteJsonString(buf, string(j.MountPoint)) + buf.WriteString(`,"count":`) + fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffjtlayerMountPointbase = iota + ffjtlayerMountPointnosuchkey + + ffjtlayerMountPointID + + ffjtlayerMountPointMountPoint + + ffjtlayerMountPointMountCount +) + +var ffjKeylayerMountPointID = []byte("id") + +var ffjKeylayerMountPointMountPoint = []byte("path") + +var ffjKeylayerMountPointMountCount = []byte("count") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *layerMountPoint) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtlayerMountPointbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtlayerMountPointnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffjKeylayerMountPointMountCount, kn) { + currentKey = ffjtlayerMountPointMountCount + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffjKeylayerMountPointID, kn) { + currentKey = ffjtlayerMountPointID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) { + currentKey = ffjtlayerMountPointMountPoint + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) { + currentKey = ffjtlayerMountPointMountCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) { + currentKey = ffjtlayerMountPointMountPoint + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) { + currentKey = ffjtlayerMountPointID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtlayerMountPointnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtlayerMountPointID: + goto handle_ID + + case ffjtlayerMountPointMountPoint: + goto handle_MountPoint + + case ffjtlayerMountPointMountCount: + goto handle_MountCount + + case ffjtlayerMountPointnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: j.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountPoint: + + /* handler: j.MountPoint type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.MountPoint = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountCount: + + /* handler: j.MountCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.MountCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *layerStore) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtlayerStorebase = iota + ffjtlayerStorenosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *layerStore) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtlayerStorebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtlayerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtlayerStorenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtlayerStorenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *simpleGetCloser) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffjtsimpleGetCloserbase = iota + ffjtsimpleGetClosernosuchkey +) + +// UnmarshalJSON umarshall json - template of ffjson +func (j *simpleGetCloser) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtsimpleGetCloserbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtsimpleGetClosernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffjtsimpleGetClosernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtsimpleGetClosernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go new file mode 100644 index 0000000..c4f1b55 --- /dev/null +++ b/vendor/github.com/containers/storage/lockfile.go @@ -0,0 +1,101 @@ +package storage + +import ( + "path/filepath" + "sync" + "time" + + "github.com/pkg/errors" +) + +// A Locker represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +type Locker interface { + // Acquire a writer lock. + Lock() + + // Acquire a writer lock recursively, allowing for recursive acquisitions + // within the same process space. + RecursiveLock() + + // Unlock the lock. + Unlock() + + // Acquire a reader lock. + RLock() + + // Touch records, for others sharing the lock, that the caller was the + // last writer. It should only be called with the lock held. + Touch() error + + // Modified() checks if the most recent writer was a party other than the + // last recorded writer. It should only be called with the lock held. + Modified() (bool, error) + + // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. + TouchedSince(when time.Time) bool + + // IsReadWrite() checks if the lock file is read-write + IsReadWrite() bool + + // Locked() checks if lock is locked for writing by a thread in this process + Locked() bool +} + +var ( + lockfiles map[string]Locker + lockfilesLock sync.Mutex +) + +// GetLockfile opens a read-write lock file, creating it if necessary. The +// Locker object may already be locked if the path has already been requested +// by the current process. +func GetLockfile(path string) (Locker, error) { + return getLockfile(path, false) +} + +// GetROLockfile opens a read-only lock file, creating it if necessary. The +// Locker object may already be locked if the path has already been requested +// by the current process. +func GetROLockfile(path string) (Locker, error) { + return getLockfile(path, true) +} + +// getLockfile returns a Locker object, possibly (depending on the platform) +// working inter-process, and associated with the specified path. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func getLockfile(path string, ro bool) (Locker, error) { + lockfilesLock.Lock() + defer lockfilesLock.Unlock() + if lockfiles == nil { + lockfiles = make(map[string]Locker) + } + cleanPath, err := filepath.Abs(path) + if err != nil { + return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path) + } + if locker, ok := lockfiles[cleanPath]; ok { + if ro && locker.IsReadWrite() { + return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath) + } + if !ro && !locker.IsReadWrite() { + return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath) + } + return locker, nil + } + locker, err := createLockerForPath(path, ro) // platform-dependent locker + if err != nil { + return nil, err + } + lockfiles[filepath.Clean(path)] = locker + return locker, nil +} diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go new file mode 100644 index 0000000..00215e9 --- /dev/null +++ b/vendor/github.com/containers/storage/lockfile_unix.go @@ -0,0 +1,265 @@ +// +build linux solaris darwin freebsd + +package storage + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type lockfile struct { + // rwMutex serializes concurrent reader-writer acquisitions in the same process space + rwMutex *sync.RWMutex + // stateMutex is used to synchronize concurrent accesses to the state below + stateMutex *sync.Mutex + counter int64 + file string + fd uintptr + lw string + locktype int16 + locked bool + ro bool + recursive bool +} + +// openLock opens the file at path and returns the corresponding file +// descriptor. Note that the path is opened read-only when ro is set. If ro +// is unset, openLock will open the path read-write and create the file if +// necessary. +func openLock(path string, ro bool) (int, error) { + if ro { + return unix.Open(path, os.O_RDONLY, 0) + } + return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) +} + +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { + // Check if we can open the lock. + fd, err := openLock(path, ro) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q", path) + } + unix.Close(fd) + + locktype := unix.F_WRLCK + if ro { + locktype = unix.F_RDLCK + } + return &lockfile{ + stateMutex: &sync.Mutex{}, + rwMutex: &sync.RWMutex{}, + file: path, + lw: stringid.GenerateRandomID(), + locktype: int16(locktype), + locked: false, + ro: ro}, nil +} + +// lock locks the lockfile via FCTNL(2) based on the specified type and +// command. +func (l *lockfile) lock(l_type int16, recursive bool) { + lk := unix.Flock_t{ + Type: l_type, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + } + switch l_type { + case unix.F_RDLCK: + l.rwMutex.RLock() + case unix.F_WRLCK: + if recursive { + // NOTE: that's okay as recursive is only set in RecursiveLock(), so + // there's no need to protect against hypothetical RDLCK cases. + l.rwMutex.RLock() + } else { + l.rwMutex.Lock() + } + default: + panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type)) + } + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + if l.counter == 0 { + // If we're the first reference on the lock, we need to open the file again. + fd, err := openLock(l.file, l.ro) + if err != nil { + panic(fmt.Sprintf("error opening %q", l.file)) + } + unix.CloseOnExec(fd) + l.fd = uintptr(fd) + + // Optimization: only use the (expensive) fcntl syscall when + // the counter is 0. In this case, we're either the first + // reader lock or a writer lock. + for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } + } + l.locktype = l_type + l.locked = true + l.recursive = recursive + l.counter++ +} + +// Lock locks the lockfile as a writer. Note that RLock() will be called if +// the lock is a read-only one. +func (l *lockfile) Lock() { + if l.ro { + l.RLock() + } else { + l.lock(unix.F_WRLCK, false) + } +} + +// RecursiveLock locks the lockfile as a writer but allows for recursive +// acquisitions within the same process space. Note that RLock() will be called +// if it's a lockTypReader lock. +func (l *lockfile) RecursiveLock() { + if l.ro { + l.RLock() + } else { + l.lock(unix.F_WRLCK, true) + } +} + +// LockRead locks the lockfile as a reader. +func (l *lockfile) RLock() { + l.lock(unix.F_RDLCK, false) +} + +// Unlock unlocks the lockfile. +func (l *lockfile) Unlock() { + lk := unix.Flock_t{ + Type: unix.F_UNLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + Pid: int32(os.Getpid()), + } + l.stateMutex.Lock() + if l.locked == false { + // Panic when unlocking an unlocked lock. That's a violation + // of the lock semantics and will reveal such. + panic("calling Unlock on unlocked lock") + } + l.counter-- + if l.counter < 0 { + // Panic when the counter is negative. There is no way we can + // recover from a corrupted lock and we need to protect the + // storage from corruption. + panic(fmt.Sprintf("lock %q has been unlocked too often", l.file)) + } + if l.counter == 0 { + // We should only release the lock when the counter is 0 to + // avoid releasing read-locks too early; a given process may + // acquire a read lock multiple times. + l.locked = false + for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } + // Close the file descriptor on the last unlock. + unix.Close(int(l.fd)) + } + if l.locktype == unix.F_RDLCK || l.recursive { + l.rwMutex.RUnlock() + } else { + l.rwMutex.Unlock() + } + l.stateMutex.Unlock() +} + +// Locked checks if lockfile is locked for writing by a thread in this process. +func (l *lockfile) Locked() bool { + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + return l.locked && (l.locktype == unix.F_WRLCK) +} + +// Touch updates the lock file with the UID of the user. +func (l *lockfile) Touch() error { + l.stateMutex.Lock() + if !l.locked || (l.locktype != unix.F_WRLCK) { + panic("attempted to update last-writer in lockfile without the write lock") + } + l.stateMutex.Unlock() + l.lw = stringid.GenerateRandomID() + id := []byte(l.lw) + _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) + if err != nil { + return err + } + n, err := unix.Write(int(l.fd), id) + if err != nil { + return err + } + if n != len(id) { + return unix.ENOSPC + } + err = unix.Fsync(int(l.fd)) + if err != nil { + return err + } + return nil +} + +// Modified indicates if the lockfile has been updated since the last time it +// was loaded. +func (l *lockfile) Modified() (bool, error) { + id := []byte(l.lw) + l.stateMutex.Lock() + if !l.locked { + panic("attempted to check last-writer in lockfile without locking it first") + } + l.stateMutex.Unlock() + _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) + if err != nil { + return true, err + } + n, err := unix.Read(int(l.fd), id) + if err != nil { + return true, err + } + if n != len(id) { + return true, nil + } + lw := l.lw + l.lw = string(id) + return l.lw != lw, nil +} + +// IsReadWriteLock indicates if the lock file is a read-write lock. +func (l *lockfile) IsReadWrite() bool { + return !l.ro +} + +// TouchedSince indicates if the lock file has been touched since the specified time +func (l *lockfile) TouchedSince(when time.Time) bool { + st, err := system.Fstat(int(l.fd)) + if err != nil { + return true + } + mtim := st.Mtim() + touched := time.Unix(mtim.Unix()) + return when.Before(touched) +} diff --git a/vendor/github.com/containers/storage/lockfile_windows.go b/vendor/github.com/containers/storage/lockfile_windows.go new file mode 100644 index 0000000..caf7c18 --- /dev/null +++ b/vendor/github.com/containers/storage/lockfile_windows.go @@ -0,0 +1,75 @@ +// +build windows + +package storage + +import ( + "os" + "sync" + "time" +) + +// createLockerForPath returns a Locker object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned Locker should correspond to the +// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockerForPath(path string, ro bool) (Locker, error) { + return &lockfile{locked: false}, nil +} + +type lockfile struct { + mu sync.Mutex + file string + locked bool +} + +func (l *lockfile) Lock() { + l.mu.Lock() + l.locked = true +} + +func (l *lockfile) RecursiveLock() { + // We don't support Windows but a recursive writer-lock in one process-space + // is really a writer lock, so just panic. + panic("not supported") +} + +func (l *lockfile) RLock() { + l.mu.Lock() + l.locked = true +} + +func (l *lockfile) Unlock() { + l.locked = false + l.mu.Unlock() +} + +func (l *lockfile) Locked() bool { + return l.locked +} + +func (l *lockfile) Modified() (bool, error) { + return false, nil +} +func (l *lockfile) Touch() error { + return nil +} +func (l *lockfile) IsReadWrite() bool { + return false +} + +func (l *lockfile) TouchedSince(when time.Time) bool { + stat, err := os.Stat(l.file) + if err != nil { + return true + } + return when.Before(stat.ModTime()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md new file mode 100644 index 0000000..7307d96 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 0000000..9cc717e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1440 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" + gzip "github.com/klauspost/pgzip" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // This is additional data to be used by the converter. It will + // not survive a round trip through JSON, so it's primarily + // intended for generating archives (i.e., converting writes). + WhiteoutData interface{} + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + } +) + +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + if ta.CopyPass { + copyPassHeader(hdr) + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + maybeTruncateHeaderModTime(hdr) + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + if err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP || (err == syscall.EPERM && inUserns) { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. We also ignore EPERM errors if we are running in a + // user namespace. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + chownOpts := options.ChownOpts + if err := remapIDs(nil, idMappings, chownOpts, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if chownOpts != nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + CopyPass: true, + InUserNS: rsystem.RunningInUserNS(), + } + archive, err := TarWithOptions(src, options) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: rsystem.RunningInUserNS(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: rsystem.RunningInUserNS(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) + + if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + options := &TarOptions{ + UIDMaps: archiver.UntarIDMappings.UIDs(), + GIDMaps: archiver.UntarIDMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: rsystem.RunningInUserNS(), + } + err = archiver.Untar(r, filepath.Dir(dst), options) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return nil +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +// NewArchiver returns a new Archiver +func NewArchiver(idMappings *idtools.IDMappings) *Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + } + hashWorker.Wait() + if err == nil { + err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// TarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) { + tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + return func(path string) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{ + Compression: Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 0000000..7bc44a5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,22 @@ +// +build go1.10 + +package archive + +import ( + "archive/tar" + "time" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 0000000..d19811f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,13 @@ +// +build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go new file mode 100644 index 0000000..9b8103e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go @@ -0,0 +1,2346 @@ +// Code generated by ffjson . DO NOT EDIT. +// source: pkg/archive/archive.go + +package archive + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "github.com/containers/storage/pkg/idtools" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +// MarshalJSON marshal bytes to json - template +func (j *Archiver) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *Archiver) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"Untar":`) + /* Falling back. type=func(io.Reader, string, *archive.TarOptions) error kind=func */ + err = buf.Encode(j.Untar) + if err != nil { + return err + } + if j.TarIDMappings != nil { + /* Struct fall back. type=idtools.IDMappings kind=struct */ + buf.WriteString(`,"TarIDMappings":`) + err = buf.Encode(j.TarIDMappings) + if err != nil { + return err + } + } else { + buf.WriteString(`,"TarIDMappings":null`) + } + if j.ChownOpts != nil { + /* Struct fall back. type=idtools.IDPair kind=struct */ + buf.WriteString(`,"ChownOpts":`) + err = buf.Encode(j.ChownOpts) + if err != nil { + return err + } + } else { + buf.WriteString(`,"ChownOpts":null`) + } + if j.UntarIDMappings != nil { + /* Struct fall back. type=idtools.IDMappings kind=struct */ + buf.WriteString(`,"UntarIDMappings":`) + err = buf.Encode(j.UntarIDMappings) + if err != nil { + return err + } + } else { + buf.WriteString(`,"UntarIDMappings":null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjtArchiverbase = iota + ffjtArchivernosuchkey + + ffjtArchiverUntar + + ffjtArchiverTarIDMappings + + ffjtArchiverChownOpts + + ffjtArchiverUntarIDMappings +) + +var ffjKeyArchiverUntar = []byte("Untar") + +var ffjKeyArchiverTarIDMappings = []byte("TarIDMappings") + +var ffjKeyArchiverChownOpts = []byte("ChownOpts") + +var ffjKeyArchiverUntarIDMappings = []byte("UntarIDMappings") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *Archiver) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *Archiver) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtArchiverbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtArchivernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'C': + + if bytes.Equal(ffjKeyArchiverChownOpts, kn) { + currentKey = ffjtArchiverChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'T': + + if bytes.Equal(ffjKeyArchiverTarIDMappings, kn) { + currentKey = ffjtArchiverTarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'U': + + if bytes.Equal(ffjKeyArchiverUntar, kn) { + currentKey = ffjtArchiverUntar + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyArchiverUntarIDMappings, kn) { + currentKey = ffjtArchiverUntarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyArchiverUntarIDMappings, kn) { + currentKey = ffjtArchiverUntarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyArchiverChownOpts, kn) { + currentKey = ffjtArchiverChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyArchiverTarIDMappings, kn) { + currentKey = ffjtArchiverTarIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyArchiverUntar, kn) { + currentKey = ffjtArchiverUntar + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtArchivernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtArchiverUntar: + goto handle_Untar + + case ffjtArchiverTarIDMappings: + goto handle_TarIDMappings + + case ffjtArchiverChownOpts: + goto handle_ChownOpts + + case ffjtArchiverUntarIDMappings: + goto handle_UntarIDMappings + + case ffjtArchivernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Untar: + + /* handler: j.Untar type=func(io.Reader, string, *archive.TarOptions) error kind=func quoted=false*/ + + { + /* Falling back. type=func(io.Reader, string, *archive.TarOptions) error kind=func */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.Untar) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TarIDMappings: + + /* handler: j.TarIDMappings type=idtools.IDMappings kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMappings kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.TarIDMappings) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ChownOpts: + + /* handler: j.ChownOpts type=idtools.IDPair kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDPair kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.ChownOpts) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UntarIDMappings: + + /* handler: j.UntarIDMappings type=idtools.IDMappings kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMappings kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.UntarIDMappings) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *TarOptions) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"IncludeFiles":`) + if j.IncludeFiles != nil { + buf.WriteString(`[`) + for i, v := range j.IncludeFiles { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"ExcludePatterns":`) + if j.ExcludePatterns != nil { + buf.WriteString(`[`) + for i, v := range j.ExcludePatterns { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"Compression":`) + fflib.FormatBits2(buf, uint64(j.Compression), 10, j.Compression < 0) + if j.NoLchown { + buf.WriteString(`,"NoLchown":true`) + } else { + buf.WriteString(`,"NoLchown":false`) + } + buf.WriteString(`,"UIDMaps":`) + if j.UIDMaps != nil { + buf.WriteString(`[`) + for i, v := range j.UIDMaps { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"GIDMaps":`) + if j.GIDMaps != nil { + buf.WriteString(`[`) + for i, v := range j.GIDMaps { + if i != 0 { + buf.WriteString(`,`) + } + /* Struct fall back. type=idtools.IDMap kind=struct */ + err = buf.Encode(&v) + if err != nil { + return err + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + if j.ChownOpts != nil { + /* Struct fall back. type=idtools.IDPair kind=struct */ + buf.WriteString(`,"ChownOpts":`) + err = buf.Encode(j.ChownOpts) + if err != nil { + return err + } + } else { + buf.WriteString(`,"ChownOpts":null`) + } + if j.IncludeSourceDir { + buf.WriteString(`,"IncludeSourceDir":true`) + } else { + buf.WriteString(`,"IncludeSourceDir":false`) + } + buf.WriteString(`,"WhiteoutFormat":`) + fflib.FormatBits2(buf, uint64(j.WhiteoutFormat), 10, j.WhiteoutFormat < 0) + buf.WriteString(`,"WhiteoutData":`) + /* Interface types must use runtime reflection. type=interface {} kind=interface */ + err = buf.Encode(j.WhiteoutData) + if err != nil { + return err + } + if j.NoOverwriteDirNonDir { + buf.WriteString(`,"NoOverwriteDirNonDir":true`) + } else { + buf.WriteString(`,"NoOverwriteDirNonDir":false`) + } + if j.RebaseNames == nil { + buf.WriteString(`,"RebaseNames":null`) + } else { + buf.WriteString(`,"RebaseNames":{ `) + for key, value := range j.RebaseNames { + fflib.WriteJsonString(buf, key) + buf.WriteString(`:`) + fflib.WriteJsonString(buf, string(value)) + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + } + if j.InUserNS { + buf.WriteString(`,"InUserNS":true`) + } else { + buf.WriteString(`,"InUserNS":false`) + } + if j.CopyPass { + buf.WriteString(`,"CopyPass":true`) + } else { + buf.WriteString(`,"CopyPass":false`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjtTarOptionsbase = iota + ffjtTarOptionsnosuchkey + + ffjtTarOptionsIncludeFiles + + ffjtTarOptionsExcludePatterns + + ffjtTarOptionsCompression + + ffjtTarOptionsNoLchown + + ffjtTarOptionsUIDMaps + + ffjtTarOptionsGIDMaps + + ffjtTarOptionsChownOpts + + ffjtTarOptionsIncludeSourceDir + + ffjtTarOptionsWhiteoutFormat + + ffjtTarOptionsWhiteoutData + + ffjtTarOptionsNoOverwriteDirNonDir + + ffjtTarOptionsRebaseNames + + ffjtTarOptionsInUserNS + + ffjtTarOptionsCopyPass +) + +var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles") + +var ffjKeyTarOptionsExcludePatterns = []byte("ExcludePatterns") + +var ffjKeyTarOptionsCompression = []byte("Compression") + +var ffjKeyTarOptionsNoLchown = []byte("NoLchown") + +var ffjKeyTarOptionsUIDMaps = []byte("UIDMaps") + +var ffjKeyTarOptionsGIDMaps = []byte("GIDMaps") + +var ffjKeyTarOptionsChownOpts = []byte("ChownOpts") + +var ffjKeyTarOptionsIncludeSourceDir = []byte("IncludeSourceDir") + +var ffjKeyTarOptionsWhiteoutFormat = []byte("WhiteoutFormat") + +var ffjKeyTarOptionsWhiteoutData = []byte("WhiteoutData") + +var ffjKeyTarOptionsNoOverwriteDirNonDir = []byte("NoOverwriteDirNonDir") + +var ffjKeyTarOptionsRebaseNames = []byte("RebaseNames") + +var ffjKeyTarOptionsInUserNS = []byte("InUserNS") + +var ffjKeyTarOptionsCopyPass = []byte("CopyPass") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *TarOptions) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *TarOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtTarOptionsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtTarOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'C': + + if bytes.Equal(ffjKeyTarOptionsCompression, kn) { + currentKey = ffjtTarOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsChownOpts, kn) { + currentKey = ffjtTarOptionsChownOpts + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsCopyPass, kn) { + currentKey = ffjtTarOptionsCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'E': + + if bytes.Equal(ffjKeyTarOptionsExcludePatterns, kn) { + currentKey = ffjtTarOptionsExcludePatterns + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'G': + + if bytes.Equal(ffjKeyTarOptionsGIDMaps, kn) { + currentKey = ffjtTarOptionsGIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'I': + + if bytes.Equal(ffjKeyTarOptionsIncludeFiles, kn) { + currentKey = ffjtTarOptionsIncludeFiles + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsIncludeSourceDir, kn) { + currentKey = ffjtTarOptionsIncludeSourceDir + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsInUserNS, kn) { + currentKey = ffjtTarOptionsInUserNS + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'N': + + if bytes.Equal(ffjKeyTarOptionsNoLchown, kn) { + currentKey = ffjtTarOptionsNoLchown + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsNoOverwriteDirNonDir, kn) { + currentKey = ffjtTarOptionsNoOverwriteDirNonDir + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'R': + + if bytes.Equal(ffjKeyTarOptionsRebaseNames, kn) { + currentKey = ffjtTarOptionsRebaseNames + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'U': + + if bytes.Equal(ffjKeyTarOptionsUIDMaps, kn) { + currentKey = ffjtTarOptionsUIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'W': + + if bytes.Equal(ffjKeyTarOptionsWhiteoutFormat, kn) { + currentKey = ffjtTarOptionsWhiteoutFormat + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsWhiteoutData, kn) { + currentKey = ffjtTarOptionsWhiteoutData + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) { + currentKey = ffjtTarOptionsCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsInUserNS, kn) { + currentKey = ffjtTarOptionsInUserNS + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsRebaseNames, kn) { + currentKey = ffjtTarOptionsRebaseNames + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsNoOverwriteDirNonDir, kn) { + currentKey = ffjtTarOptionsNoOverwriteDirNonDir + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsWhiteoutData, kn) { + currentKey = ffjtTarOptionsWhiteoutData + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsWhiteoutFormat, kn) { + currentKey = ffjtTarOptionsWhiteoutFormat + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsIncludeSourceDir, kn) { + currentKey = ffjtTarOptionsIncludeSourceDir + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsChownOpts, kn) { + currentKey = ffjtTarOptionsChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsGIDMaps, kn) { + currentKey = ffjtTarOptionsGIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsUIDMaps, kn) { + currentKey = ffjtTarOptionsUIDMaps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeyTarOptionsNoLchown, kn) { + currentKey = ffjtTarOptionsNoLchown + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsCompression, kn) { + currentKey = ffjtTarOptionsCompression + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsExcludePatterns, kn) { + currentKey = ffjtTarOptionsExcludePatterns + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeyTarOptionsIncludeFiles, kn) { + currentKey = ffjtTarOptionsIncludeFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtTarOptionsnosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtTarOptionsIncludeFiles: + goto handle_IncludeFiles + + case ffjtTarOptionsExcludePatterns: + goto handle_ExcludePatterns + + case ffjtTarOptionsCompression: + goto handle_Compression + + case ffjtTarOptionsNoLchown: + goto handle_NoLchown + + case ffjtTarOptionsUIDMaps: + goto handle_UIDMaps + + case ffjtTarOptionsGIDMaps: + goto handle_GIDMaps + + case ffjtTarOptionsChownOpts: + goto handle_ChownOpts + + case ffjtTarOptionsIncludeSourceDir: + goto handle_IncludeSourceDir + + case ffjtTarOptionsWhiteoutFormat: + goto handle_WhiteoutFormat + + case ffjtTarOptionsWhiteoutData: + goto handle_WhiteoutData + + case ffjtTarOptionsNoOverwriteDirNonDir: + goto handle_NoOverwriteDirNonDir + + case ffjtTarOptionsRebaseNames: + goto handle_RebaseNames + + case ffjtTarOptionsInUserNS: + goto handle_InUserNS + + case ffjtTarOptionsCopyPass: + goto handle_CopyPass + + case ffjtTarOptionsnosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_IncludeFiles: + + /* handler: j.IncludeFiles type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.IncludeFiles = nil + } else { + + j.IncludeFiles = []string{} + + wantVal := true + + for { + + var tmpJIncludeFiles string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJIncludeFiles type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJIncludeFiles = string(string(outBuf)) + + } + } + + j.IncludeFiles = append(j.IncludeFiles, tmpJIncludeFiles) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExcludePatterns: + + /* handler: j.ExcludePatterns type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.ExcludePatterns = nil + } else { + + j.ExcludePatterns = []string{} + + wantVal := true + + for { + + var tmpJExcludePatterns string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJExcludePatterns type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJExcludePatterns = string(string(outBuf)) + + } + } + + j.ExcludePatterns = append(j.ExcludePatterns, tmpJExcludePatterns) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Compression: + + /* handler: j.Compression type=archive.Compression kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.Compression = Compression(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NoLchown: + + /* handler: j.NoLchown type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.NoLchown = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.NoLchown = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UIDMaps: + + /* handler: j.UIDMaps type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.UIDMaps = nil + } else { + + j.UIDMaps = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJUIDMaps idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJUIDMaps type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJUIDMaps) + if err != nil { + return fs.WrapErr(err) + } + } + + j.UIDMaps = append(j.UIDMaps, tmpJUIDMaps) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GIDMaps: + + /* handler: j.GIDMaps type=[]idtools.IDMap kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.GIDMaps = nil + } else { + + j.GIDMaps = []idtools.IDMap{} + + wantVal := true + + for { + + var tmpJGIDMaps idtools.IDMap + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJGIDMaps type=idtools.IDMap kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMap kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &tmpJGIDMaps) + if err != nil { + return fs.WrapErr(err) + } + } + + j.GIDMaps = append(j.GIDMaps, tmpJGIDMaps) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ChownOpts: + + /* handler: j.ChownOpts type=idtools.IDPair kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDPair kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.ChownOpts) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IncludeSourceDir: + + /* handler: j.IncludeSourceDir type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.IncludeSourceDir = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.IncludeSourceDir = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WhiteoutFormat: + + /* handler: j.WhiteoutFormat type=archive.WhiteoutFormat kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for WhiteoutFormat", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.WhiteoutFormat = WhiteoutFormat(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WhiteoutData: + + /* handler: j.WhiteoutData type=interface {} kind=interface quoted=false*/ + + { + /* Falling back. type=interface {} kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.WhiteoutData) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NoOverwriteDirNonDir: + + /* handler: j.NoOverwriteDirNonDir type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.NoOverwriteDirNonDir = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.NoOverwriteDirNonDir = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RebaseNames: + + /* handler: j.RebaseNames type=map[string]string kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.RebaseNames = nil + } else { + + j.RebaseNames = make(map[string]string, 0) + + wantVal := true + + for { + + var k string + + var tmpJRebaseNames string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + k = string(string(outBuf)) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJRebaseNames type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJRebaseNames = string(string(outBuf)) + + } + } + + j.RebaseNames[k] = tmpJRebaseNames + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InUserNS: + + /* handler: j.InUserNS type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.InUserNS = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.InUserNS = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CopyPass: + + /* handler: j.CopyPass type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.CopyPass = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.CopyPass = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *TempArchive) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *TempArchive) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"Size":`) + fflib.FormatBits2(buf, uint64(j.Size), 10, j.Size < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffjtTempArchivebase = iota + ffjtTempArchivenosuchkey + + ffjtTempArchiveSize +) + +var ffjKeyTempArchiveSize = []byte("Size") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *TempArchive) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *TempArchive) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjtTempArchivebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjtTempArchivenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'S': + + if bytes.Equal(ffjKeyTempArchiveSize, kn) { + currentKey = ffjtTempArchiveSize + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeyTempArchiveSize, kn) { + currentKey = ffjtTempArchiveSize + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjtTempArchivenosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjtTempArchiveSize: + goto handle_Size + + case ffjtTempArchivenosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Size: + + /* handler: j.Size type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + j.Size = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +// MarshalJSON marshal bytes to json - template +func (j *tarAppender) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if j == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := j.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshal buff to json - template +func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if j == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if j.TarWriter != nil { + /* Struct fall back. type=tar.Writer kind=struct */ + buf.WriteString(`{"TarWriter":`) + err = buf.Encode(j.TarWriter) + if err != nil { + return err + } + } else { + buf.WriteString(`{"TarWriter":null`) + } + if j.Buffer != nil { + /* Struct fall back. type=bufio.Writer kind=struct */ + buf.WriteString(`,"Buffer":`) + err = buf.Encode(j.Buffer) + if err != nil { + return err + } + } else { + buf.WriteString(`,"Buffer":null`) + } + /* Falling back. type=map[uint64]string kind=map */ + buf.WriteString(`,"SeenFiles":`) + err = buf.Encode(j.SeenFiles) + if err != nil { + return err + } + if j.IDMappings != nil { + /* Struct fall back. type=idtools.IDMappings kind=struct */ + buf.WriteString(`,"IDMappings":`) + err = buf.Encode(j.IDMappings) + if err != nil { + return err + } + } else { + buf.WriteString(`,"IDMappings":null`) + } + if j.ChownOpts != nil { + /* Struct fall back. type=idtools.IDPair kind=struct */ + buf.WriteString(`,"ChownOpts":`) + err = buf.Encode(j.ChownOpts) + if err != nil { + return err + } + } else { + buf.WriteString(`,"ChownOpts":null`) + } + buf.WriteString(`,"WhiteoutConverter":`) + /* Interface types must use runtime reflection. type=archive.tarWhiteoutConverter kind=interface */ + err = buf.Encode(j.WhiteoutConverter) + if err != nil { + return err + } + if j.CopyPass { + buf.WriteString(`,"CopyPass":true`) + } else { + buf.WriteString(`,"CopyPass":false`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffjttarAppenderbase = iota + ffjttarAppendernosuchkey + + ffjttarAppenderTarWriter + + ffjttarAppenderBuffer + + ffjttarAppenderSeenFiles + + ffjttarAppenderIDMappings + + ffjttarAppenderChownOpts + + ffjttarAppenderWhiteoutConverter + + ffjttarAppenderCopyPass +) + +var ffjKeytarAppenderTarWriter = []byte("TarWriter") + +var ffjKeytarAppenderBuffer = []byte("Buffer") + +var ffjKeytarAppenderSeenFiles = []byte("SeenFiles") + +var ffjKeytarAppenderIDMappings = []byte("IDMappings") + +var ffjKeytarAppenderChownOpts = []byte("ChownOpts") + +var ffjKeytarAppenderWhiteoutConverter = []byte("WhiteoutConverter") + +var ffjKeytarAppenderCopyPass = []byte("CopyPass") + +// UnmarshalJSON umarshall json - template of ffjson +func (j *tarAppender) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +// UnmarshalJSONFFLexer fast json unmarshall - template ffjson +func (j *tarAppender) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error + currentKey := ffjttarAppenderbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffjttarAppendernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'B': + + if bytes.Equal(ffjKeytarAppenderBuffer, kn) { + currentKey = ffjttarAppenderBuffer + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'C': + + if bytes.Equal(ffjKeytarAppenderChownOpts, kn) { + currentKey = ffjttarAppenderChownOpts + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeytarAppenderCopyPass, kn) { + currentKey = ffjttarAppenderCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'I': + + if bytes.Equal(ffjKeytarAppenderIDMappings, kn) { + currentKey = ffjttarAppenderIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'S': + + if bytes.Equal(ffjKeytarAppenderSeenFiles, kn) { + currentKey = ffjttarAppenderSeenFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'T': + + if bytes.Equal(ffjKeytarAppenderTarWriter, kn) { + currentKey = ffjttarAppenderTarWriter + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'W': + + if bytes.Equal(ffjKeytarAppenderWhiteoutConverter, kn) { + currentKey = ffjttarAppenderWhiteoutConverter + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffjKeytarAppenderCopyPass, kn) { + currentKey = ffjttarAppenderCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderWhiteoutConverter, kn) { + currentKey = ffjttarAppenderWhiteoutConverter + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeytarAppenderChownOpts, kn) { + currentKey = ffjttarAppenderChownOpts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeytarAppenderIDMappings, kn) { + currentKey = ffjttarAppenderIDMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffjKeytarAppenderSeenFiles, kn) { + currentKey = ffjttarAppenderSeenFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderBuffer, kn) { + currentKey = ffjttarAppenderBuffer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderTarWriter, kn) { + currentKey = ffjttarAppenderTarWriter + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffjttarAppendernosuchkey + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffjttarAppenderTarWriter: + goto handle_TarWriter + + case ffjttarAppenderBuffer: + goto handle_Buffer + + case ffjttarAppenderSeenFiles: + goto handle_SeenFiles + + case ffjttarAppenderIDMappings: + goto handle_IDMappings + + case ffjttarAppenderChownOpts: + goto handle_ChownOpts + + case ffjttarAppenderWhiteoutConverter: + goto handle_WhiteoutConverter + + case ffjttarAppenderCopyPass: + goto handle_CopyPass + + case ffjttarAppendernosuchkey: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_TarWriter: + + /* handler: j.TarWriter type=tar.Writer kind=struct quoted=false*/ + + { + /* Falling back. type=tar.Writer kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.TarWriter) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Buffer: + + /* handler: j.Buffer type=bufio.Writer kind=struct quoted=false*/ + + { + /* Falling back. type=bufio.Writer kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.Buffer) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SeenFiles: + + /* handler: j.SeenFiles type=map[uint64]string kind=map quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.SeenFiles = nil + } else { + + j.SeenFiles = make(map[uint64]string, 0) + + wantVal := true + + for { + + var k uint64 + + var tmpJSeenFiles string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_bracket { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: k type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + k = uint64(tval) + + } + } + + // Expect ':' after key + tok = fs.Scan() + if tok != fflib.FFTok_colon { + return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) + } + + tok = fs.Scan() + /* handler: tmpJSeenFiles type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJSeenFiles = string(string(outBuf)) + + } + } + + j.SeenFiles[k] = tmpJSeenFiles + + wantVal = false + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IDMappings: + + /* handler: j.IDMappings type=idtools.IDMappings kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDMappings kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.IDMappings) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ChownOpts: + + /* handler: j.ChownOpts type=idtools.IDPair kind=struct quoted=false*/ + + { + /* Falling back. type=idtools.IDPair kind=struct */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.ChownOpts) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WhiteoutConverter: + + /* handler: j.WhiteoutConverter type=archive.tarWhiteoutConverter kind=interface quoted=false*/ + + { + /* Falling back. type=archive.tarWhiteoutConverter kind=interface */ + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = json.Unmarshal(tbuf, &j.WhiteoutConverter) + if err != nil { + return fs.WrapErr(err) + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CopyPass: + + /* handler: j.CopyPass type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.CopyPass = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.CopyPass = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 0000000..5602c7e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,143 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { + return overlayWhiteoutConverter{rolayers: rolayers} + } + return overlayWhiteoutConverter{rolayers: nil} + } + return nil +} + +type overlayWhiteoutConverter struct { + rolayers []string +} + +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + // If there are no lower layers, then it can't have been deleted in this layer. + if len(o.rolayers) == 0 { + return nil, nil + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, rolayer := range o.rolayers { + stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // It's a whiteout for this directory, so it can't have been + // both deleted and recreated in the layer we're diffing. + s := stat.Sys().(*syscall.Stat_t) + if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + return nil, nil + } + } + // It's not whiteout, so it was there in the older layer, so we need to + // add a whiteout for this item in this layer. + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + break + } + for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory in a parent layer. + stat, statErr := os.Stat(filepath.Join(rolayer, dir)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into this layer, + // so we don't need to emit whiteout for it. + s := stat.Sys().(*syscall.Stat_t) + if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + return nil, nil + } + } + } + } + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + if err := idtools.SafeChown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 0000000..585faa8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 0000000..bdc1a3d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,122 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 0000000..0bcbb92 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,79 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 0000000..d3d6c8f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,494 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +func aufsWhiteoutPresent(root, path string) (bool, error) { + f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) + _, err := os.Stat(filepath.Join(root, f)) + if err == nil { + return true, nil + } + if os.IsNotExist(err) || isENOTDIR(err) { + return false, nil + } + return false, err +} + +func isENOTDIR(err error) bool { + if err == nil { + return false + } + if perror, ok := err.(*os.PathError); ok { + if errno, ok := perror.Err.(syscall.Errno); ok { + return errno == syscall.ENOTDIR + } + } + return false +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) +type whiteoutChange func(string, string) (bool, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + layerScan: + for _, layer := range layers { + if wc != nil { + // ...Unless a lower layer also had whiteout for this directory or one of its parents, + // in which case, it's new + ignore, err := wc(layer, path) + if err != nil { + return err + } + if ignore { + break layerScan + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + ignore, err = wc(layer, dir) + if err != nil { + return err + } + if ignore { + break layerScan + } + } + } + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + tail := []Change{} + for parent != "/" { + if _, ok := changedDirs[parent]; !ok && parent != "/" { + tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...) + changedDirs[parent] = struct{}{} + } + parent = filepath.Dir(parent) + } + changes = append(changes, tail...) + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + idMappings *idtools.IDMappings + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, oldInfo, newStat, info) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 0000000..b123093 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,387 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings + idmap2 *idtools.IDMappings +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + dc := func(root, path string, fi os.FileInfo) (string, error) { + return overlayDeletedFile(layers, root, path, fi) + } + return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) +} + +func overlayLowerContainsWhiteout(root, path string) (bool, error) { + // Whiteout for a file or directory has the same name, but is for a character + // device with major/minor of 0/0. + stat, err := os.Stat(filepath.Join(root, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return false, err + } + if err == nil && stat.Mode()&os.ModeCharDevice != 0 { + // Check if there's whiteout for the specified item in the specified layer. + s := stat.Sys().(*syscall.Stat_t) + if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + return true, nil + } + } + return false, nil +} + +func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { + // If it's a whiteout item, then a file or directory with that name is removed by this layer. + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + return path, nil + } + } + // After this we only need to pay attention to directories. + if !fi.IsDir() { + return "", nil + } + // If the directory isn't marked as opaque, then it's just a normal directory. + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) != 1 || opaque[0] != 'y' { + return "", err + } + // If there are no lower layers, then it can't have been deleted and recreated in this layer. + if len(layers) == 0 { + return "", err + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // It's a whiteout for this directory, so it can't have been + // deleted in this layer. + s := stat.Sys().(*syscall.Stat_t) + if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + return "", nil + } + } + // It's not whiteout, so it was there in the older layer, so it has to be + // marked as deleted in this layer. + return path, nil + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory. + stat, err := os.Stat(filepath.Join(layer, dir)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into the top layer. + s := stat.Sys().(*syscall.Stat_t) + if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + return "", nil + } + } + } + } + } + + // We didn't find the same path in any older layers, so it was new in this one. + return "", nil + +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 0000000..bbbd8c9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,99 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir, newIDMap) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 0000000..031ec34 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,48 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + oldUid, oldGid := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUid), GID: int(oldGid)}); err == nil { + oldUid = uint32(oldcuid) + oldGid = uint32(oldcgid) + } + } + ownerChanged := uid != oldUid || gid != oldGid + if oldStat.Mode() != newStat.Mode() || + ownerChanged || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 0000000..966400e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 0000000..ea012b2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,461 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 0000000..e305b5e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 0000000..2b775b4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 0000000..eb1f2b4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,256 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 0000000..3448569 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 0000000..e85aac0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 0000000..d20478a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 0000000..b39d12c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go new file mode 100644 index 0000000..33ba6a1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -0,0 +1,177 @@ +package chrootarchive + +import ( + stdtar "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/pkg/errors" +) + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiver(idMappings) + archiver.Untar = Untar + return archiver +} + +// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings) + archiver.Untar = Untar + return archiver +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true, dest) +} + +// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory +// The root directory is the directory that will be chrooted to. +// `dest` must be a path within `root`, if it is not an error will be returned. +// +// `root` should set to a directory which is not controlled by any potentially +// malicious process. +// +// This should be used to prevent a potential attacker from manipulating `dest` +// such that it would provide access to files outside of `dest` through things +// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however +// sanitizing symlinks in this manner is inherrently racey: +// ref: CVE-2018-15664 +func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + return untarHandler(tarArchive, dest, options, true, root) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false, dest) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + options.InUserNS = rsystem.RunningInUserNS() + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options, root) +} + +// Tar tars the requested path while chrooted to the specified root. +func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if options == nil { + options = &archive.TarOptions{} + } + return invokePack(srcPath, options, root) +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := stdtar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + } + hashWorker.Wait() + if err == nil { + err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go new file mode 100644 index 0000000..ca9fb10 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,206 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/pkg/errors" +) + +// untar is the entry-point for storage-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + dst := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = dst + } + + if err := chroot(root); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, dst, &options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + if root == "" { + return errors.New("must specify a root to chroot to") + } + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + if root != "" { + relDest, err := filepath.Rel(root, dest) + if err != nil { + return err + } + if relDest == "." { + relDest = "/" + } + if relDest[0] != '/' { + relDest = "/" + relDest + } + dest = relDest + } + + cmd := reexec.Command("storage-untar", dest, root) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} + +func tar() { + runtime.LockOSThread() + flag.Parse() + + src := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = src + } + + if err := realChroot(root); err != nil { + fatal(err) + } + + var options archive.TarOptions + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + rdr, err := archive.TarWithOptions(src, &options) + if err != nil { + fatal(err) + } + defer rdr.Close() + + if _, err := io.Copy(os.Stdout, rdr); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if root == "" { + return nil, errors.New("root path must not be empty") + } + + relSrc, err := filepath.Rel(root, srcPath) + if err != nil { + return nil, err + } + if relSrc == "." { + relSrc = "/" + } + if relSrc[0] != '/' { + relSrc = "/" + relSrc + } + + // make sure we didn't trim a trailing slash with the call to `Rel` + if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { + relSrc += "/" + } + + cmd := reexec.Command("storage-tar", relSrc, root) + + errBuff := bytes.NewBuffer(nil) + cmd.Stderr = errBuff + + tarR, tarW := io.Pipe() + cmd.Stdout = tarW + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, errors.Wrap(err, "error getting options pipe for tar process") + } + + if err := cmd.Start(); err != nil { + return nil, errors.Wrap(err, "tar error on re-exec cmd") + } + + go func() { + err := cmd.Wait() + err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + tarW.CloseWithError(err) + }() + + if err := json.NewEncoder(stdin).Encode(options); err != nil { + stdin.Close() + return nil, errors.Wrap(err, "tar json encode to pipe failed") + } + stdin.Close() + + return tarR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go new file mode 100644 index 0000000..8a5c680 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,29 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions, root string) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the pack. We call inline instead within the daemon process. + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 0000000..76c94c6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,114 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "github.com/syndtr/gocapability/capability" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + caps, err := capability.NewPid(0) + if err != nil { + return err + } + + // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot + if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { + return realChroot(path) + } + + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 0000000..83278ee --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!linux + +package chrootarchive + +import "golang.org/x/sys/unix" + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} + +func chroot(path string) error { + return realChroot(path) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go new file mode 100644 index 0000000..68b8f74 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go new file mode 100644 index 0000000..4369f30 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for storage-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("storage-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go new file mode 100644 index 0000000..8f8e88b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go new file mode 100644 index 0000000..ea08135 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("storage-applyLayer", applyLayer) + reexec.Register("storage-untar", untar) + reexec.Register("storage-tar", tar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go new file mode 100644 index 0000000..fa17c9b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go new file mode 100644 index 0000000..bdb5fbc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -0,0 +1,96 @@ +package config + +// ThinpoolOptionsConfig represents the "storage.options.thinpool" +// TOML config table. +type ThinpoolOptionsConfig struct { + // AutoExtendPercent determines the amount by which pool needs to be + // grown. This is specified in terms of % of pool size. So a value of + // 20 means that when threshold is hit, pool will be grown by 20% of + // existing pool size. + AutoExtendPercent string `toml:"autoextend_percent"` + + // AutoExtendThreshold determines the pool extension threshold in terms + // of percentage of pool size. For example, if threshold is 60, that + // means when pool is 60% full, threshold has been hit. + AutoExtendThreshold string `toml:"autoextend_threshold"` + + // BaseSize specifies the size to use when creating the base device, + // which limits the size of images and containers. + BaseSize string `toml:"basesize"` + + // BlockSize specifies a custom blocksize to use for the thin pool. + BlockSize string `toml:"blocksize"` + + // DirectLvmDevice specifies a custom block storage device to use for + // the thin pool. + DirectLvmDevice string `toml:"directlvm_device"` + + // DirectLvmDeviceForcewipes device even if device already has a + // filesystem + DirectLvmDeviceForce string `toml:"directlvm_device_force"` + + // Fs specifies the filesystem type to use for the base device. + Fs string `toml:"fs"` + + // log_level sets the log level of devicemapper. + LogLevel string `toml:"log_level"` + + // MinFreeSpace specifies the min free space percent in a thin pool + // require for new device creation to + MinFreeSpace string `toml:"min_free_space"` + + // MkfsArg specifies extra mkfs arguments to be used when creating the + // basedevice. + MkfsArg string `toml:"mkfsarg"` + + // MountOpt specifies extra mount options used when mounting the thin + // devices. + MountOpt string `toml:"mountopt"` + + // UseDeferredDeletion marks device for deferred deletion + UseDeferredDeletion string `toml:"use_deferred_deletion"` + + // UseDeferredRemoval marks device for deferred removal + UseDeferredRemoval string `toml:"use_deferred_removal"` + + // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of + // retries XFS should attempt to complete IO when ENOSPC (no space) + // error is returned by underlying storage device. + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` +} + +// OptionsConfig represents the "storage.options" TOML config table. +type OptionsConfig struct { + // AdditionalImagesStores is the location of additional read/only + // Image stores. Usually used to access Networked File System + // for shared image content + AdditionalImageStores []string `toml:"additionalimagestores"` + + // Size + Size string `toml:"size"` + + // RemapUIDs is a list of default UID mappings to use for layers. + RemapUIDs string `toml:"remap-uids"` + // RemapGIDs is a list of default GID mappings to use for layers. + RemapGIDs string `toml:"remap-gids"` + + // RemapUser is the name of one or more entries in /etc/subuid which + // should be used to set up default UID mappings. + RemapUser string `toml:"remap-user"` + // RemapGroup is the name of one or more entries in /etc/subgid which + // should be used to set up default GID mappings. + RemapGroup string `toml:"remap-group"` + // Thinpool container options to be handed to thinpool drivers + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + // OSTree repository + OstreeRepo string `toml:"ostree_repo"` + + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home"` + + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program"` + + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt"` +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go new file mode 100644 index 0000000..6a0ac24 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -0,0 +1,821 @@ +// +build linux,cgo + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Same as DM_DEVICE_* enum values from libdevmapper.h +// nolint: deadcode +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + runtime.KeepAlive(t) + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) + return ErrUdevWait + } + return nil +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(cookie) + + dmSawBusy = false // reset before the task is run + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all following calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(cookie) + + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + unix.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go new file mode 100644 index 0000000..b540281 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,121 @@ +// +build linux,cgo + +package devicemapper + +import "C" + +import ( + "fmt" + "strings" + + "github.com/sirupsen/logrus" +) + +// DevmapperLogger defines methods required to register as a callback for +// logging events recieved from devicemapper. Note that devicemapper will send +// *all* logs regardless to callbacks (including debug logs) so it's +// recommended to not spam the console with the outputs. +type DevmapperLogger interface { + // DMLog is the logging callback containing all of the information from + // devicemapper. The interface is identical to the C libdm counterpart. + DMLog(level int, file string, line int, dmError int, message string) +} + +// dmLogger is the current logger in use that is being forwarded our messages. +var dmLogger DevmapperLogger + +// LogInit changes the logging callback called after processing libdm logs for +// error message information. The default logger simply forwards all logs to +// logrus. Calling LogInit(nil) disables the calling of callbacks. +func LogInit(logger DevmapperLogger) { + dmLogger = logger +} + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that +// because we are using callbacks, this function will be called for *every* log +// in libdm (even debug ones because there's no way of setting the verbosity +// level for an external logging callback). +//export StorageDevmapperLogCallback +func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + + // Track what errno libdm saw, because the library only gives us 0 or 1. + if level < LogLevelDebug { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} + +// DefaultLogger is the default logger used by pkg/devicemapper. It forwards +// all logs that are of higher or equal priority to the given level to the +// corresponding logrus level. +type DefaultLogger struct { + // Level corresponds to the highest libdm level that will be forwarded to + // logrus. In order to change this, register a new DefaultLogger. + Level int +} + +// DMLog is the logging callback containing all of the information from +// devicemapper. The interface is identical to the C libdm counterpart. +func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { + if level <= l.Level { + // Forward the log to the correct logrus level, if allowed by dmLogLevel. + logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + switch level { + case LogLevelFatal, LogLevelErr: + logrus.Error(logMsg) + case LogLevelWarn: + logrus.Warn(logMsg) + case LogLevelNotice, LogLevelInfo: + logrus.Info(logMsg) + case LogLevelDebug: + logrus.Debug(logMsg) + default: + // Don't drop any "unknown" levels. + logrus.Info(logMsg) + } + } +} + +// registerLogCallback registers our own logging callback function for libdm +// (which is StorageDevmapperLogCallback). +// +// Because libdm only gives us {0,1} error codes we need to parse the logs +// produced by libdm (to set dmSawBusy and so on). Note that by registering a +// callback using StorageDevmapperLogCallback, libdm will no longer output logs to +// stderr so we have to log everything ourselves. None of this handling is +// optional because we depend on log callbacks to parse the logs, and if we +// don't forward the log information we'll be in a lot of trouble when +// debugging things. +func registerLogCallback() { + LogWithErrnoInit() +} + +func init() { + // Use the default logger by default. We only allow LogLevelFatal by + // default, because internally we mask a lot of libdm errors by retrying + // and similar tricks. Also, libdm is very chatty and we don't want to + // worry users for no reason. + dmLogger = DefaultLogger{ + Level: LogLevelFatal, + } + + // Register as early as possible so we don't miss anything. + registerLogCallback() +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 0000000..190d83d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,252 @@ +// +build linux,cgo + +package devicemapper + +/* +#define _GNU_SOURCE +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char *buffer = NULL; + va_list ap; + int ret; + + va_start(ap, f); + ret = vasprintf(&buffer, f, ap); + va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } + + StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + free(buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 0000000..7f793c2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,31 @@ +// +build linux,cgo,!libdm_no_deferred_remove + +package devicemapper + +// #include +import "C" + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go new file mode 100644 index 0000000..7d84508 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build + +package devicemapper + +// #cgo pkg-config: devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 0000000..a880fec --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,cgo,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go new file mode 100644 index 0000000..cf7f26a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go @@ -0,0 +1,6 @@ +// +build linux,cgo,static_build + +package devicemapper + +// #cgo pkg-config: --static devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go new file mode 100644 index 0000000..50ea7c4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -0,0 +1,28 @@ +// +build linux,cgo + +package devicemapper + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go new file mode 100644 index 0000000..cee5e54 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go new file mode 100644 index 0000000..1715ef4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go new file mode 100644 index 0000000..05522d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux darwin freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go new file mode 100644 index 0000000..6fb0917 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go new file mode 100644 index 0000000..7df7f3d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go @@ -0,0 +1,20 @@ +// +build linux + +package dmesg + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Dmesg returns last messages from the kernel log, up to size bytes +func Dmesg(size int) []byte { + t := uintptr(3) // SYSLOG_ACTION_READ_ALL + b := make([]byte, size) + amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if err != 0 { + return []byte{} + } + return b[:amt] +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go new file mode 100644 index 0000000..a129e65 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -0,0 +1,298 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths agaist a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 0000000..ccd648f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 0000000..0f2cb7a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go new file mode 100644 index 0000000..9e0e97b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go new file mode 100644 index 0000000..5ec21ca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go new file mode 100644 index 0000000..e6094b5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,88 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *unix.Dirent) bool { + visited++ + if ent.Type == unix.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := unix.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go new file mode 100644 index 0000000..c001fbe --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go @@ -0,0 +1,23 @@ +// +build linux + +package homedir + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go new file mode 100644 index 0000000..6b96b85 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go new file mode 100644 index 0000000..f2a20ea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package homedir + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go new file mode 100644 index 0000000..fafdb2b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go new file mode 100644 index 0000000..8155893 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -0,0 +1,297 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} + +func checkChownErr(err error, name string, uid, gid int) error { + if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { + return errors.Wrapf(err, "there might not be enough IDs available in the namespace (requested %d:%d for %s)", uid, gid, name) + } + return err +} + +func SafeChown(name string, uid, gid int) error { + return checkChownErr(os.Chown(name, uid, gid), name, uid, gid) +} + +func SafeLchown(name string, uid, gid int) error { + return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000..bdbdf1b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -0,0 +1,204 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return SafeChown(path, ownerUID, ownerGID) + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000..dbf6bc4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go new file mode 100644 index 0000000..86f98f1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -0,0 +1,63 @@ +package idtools + +import ( + "fmt" + "math" + "math/bits" + "strconv" + "strings" +) + +func nonDigitsToWhitespace(r rune) rune { + if !strings.ContainsRune("0123456789", r) { + return ' ' + } + return r +} + +func parseTriple(spec []string) (container, host, size uint32, err error) { + cid, err := strconv.ParseUint(spec[0], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) + } + hid, err := strconv.ParseUint(spec[1], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) + } + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) + } + return uint32(cid), uint32(hid), uint32(sz), nil +} + +// ParseIDMap parses idmap triples from string. +func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { + stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) + for _, idMapSpec := range mapSpec { + idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) + if len(idSpec)%3 != 0 { + return nil, stdErr + } + for i := range idSpec { + if i%3 != 0 { + continue + } + cid, hid, size, err := parseTriple(idSpec[i : i+3]) + if err != nil { + return nil, stdErr + } + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { + return nil, stdErr + } + mapping := IDMap{ + ContainerID: int(cid), + HostID: int(hid), + Size: int(size), + } + idmap = append(idmap, mapping) + } + } + return idmap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000..9da7975 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000..d98b354 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go new file mode 100644 index 0000000..9703ecb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go new file mode 100644 index 0000000..3d737b3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000..72a04f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go new file mode 100644 index 0000000..a56c462 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go new file mode 100644 index 0000000..63f3c07 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000..1539ad2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000..c719c12 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/containers/storage/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000..52a4901 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go new file mode 100644 index 0000000..ccc7f9c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/containers/storage/pkg/locker/README.md b/vendor/github.com/containers/storage/pkg/locker/README.md new file mode 100644 index 0000000..ad15e89 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/containers/storage/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/containers/storage/pkg/locker/locker.go new file mode 100644 index 0000000..0b22ddf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/vendor/github.com/containers/storage/pkg/longpath/longpath.go new file mode 100644 index 0000000..9b15bff --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go new file mode 100644 index 0000000..34c8054 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -0,0 +1,157 @@ +// +build linux + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { + // Read information about the loopback file. + var st syscall.Stat_t + err = syscall.Fstat(int(sparseFile.Fd()), &st) + if err != nil { + logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + + // Check if the loopback driver and underlying filesystem agree on the loopback file's + // device and inode numbers. + dev, ino, err := getLoopbackBackingFile(loopFile) + if err != nil { + logrus.Errorf("Error getting loopback backing file: %s", err) + return nil, ErrGetLoopbackBackingFile + } + if dev != st.Dev || ino != st.Ino { + logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) + } + + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go new file mode 100644 index 0000000..0714eb5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go new file mode 100644 index 0000000..e1100ce --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go new file mode 100644 index 0000000..a8ec3c6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go new file mode 100644 index 0000000..07a0f48 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// ParseOptions parses fstab type mount options into mount() flags +// and device specific data +func ParseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := ParseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 0000000..5f76f33 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go new file mode 100644 index 0000000..0425d0d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go new file mode 100644 index 0000000..9ed741e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go new file mode 100644 index 0000000..7197448 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -0,0 +1,106 @@ +package mount + +import ( + "sort" + "strings" + "time" + + "github.com/containers/storage/pkg/fileutils" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint) + if err != nil { + return false, err + } + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := ParseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := ParseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Sort(sort.Reverse(byMountpoint(mounts))) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { + return err + } + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there ane any submounts remaining + } + } + return nil +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go new file mode 100644 index 0000000..814896c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -0,0 +1,60 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go new file mode 100644 index 0000000..39c36d4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -0,0 +1,57 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + } + + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go new file mode 100644 index 0000000..48b8677 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go @@ -0,0 +1,34 @@ +// +build solaris,cgo + +package mount + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go new file mode 100644 index 0000000..a2a3bb4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go new file mode 100644 index 0000000..ff4cc1d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -0,0 +1,54 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +type byMountpoint []*Info + +func (by byMountpoint) Len() int { + return len(by) +} + +func (by byMountpoint) Less(i, j int) bool { + return by[i].Mountpoint < by[j].Mountpoint +} + +func (by byMountpoint) Swap(i, j int) { + by[i], by[j] = by[j], by[i] +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 0000000..4f32edc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go new file mode 100644 index 0000000..be69fee --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go new file mode 100644 index 0000000..ad9ab57 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 0000000..7fbcf19 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go new file mode 100644 index 0000000..dab8a37 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000..8ceec84 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 0000000..09f6b03 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go b/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go new file mode 100644 index 0000000..0a8e7d6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go @@ -0,0 +1,19 @@ +// +build !ostree + +package ostree + +func OstreeSupport() bool { + return false +} + +func DeleteOSTree(repoLocation, id string) error { + return nil +} + +func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { + return nil +} + +func ConvertToOSTree(repoLocation, root, id string) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ostree/ostree.go b/vendor/github.com/containers/storage/pkg/ostree/ostree.go new file mode 100644 index 0000000..e9b57a0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ostree/ostree.go @@ -0,0 +1,198 @@ +// +build ostree + +package ostree + +import ( + "fmt" + "golang.org/x/sys/unix" + "os" + "path/filepath" + "runtime" + "syscall" + "time" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +func OstreeSupport() bool { + return true +} + +func fixFiles(dir string, usermode bool) (bool, []string, error) { + var SkipOstree = errors.New("skip ostree deduplication") + + var whiteouts []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if !usermode { + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("not syscall.Stat_t") + } + + if stat.Rdev == 0 && (stat.Mode&unix.S_IFCHR) != 0 { + whiteouts = append(whiteouts, path) + return nil + } + } + // Skip the ostree deduplication if we encounter a file type that + // ostree does not manage. + return SkipOstree + } + if info.IsDir() { + if usermode { + if err := os.Chmod(path, info.Mode()|0700); err != nil { + return err + } + } + } else if usermode && (info.Mode().IsRegular()) { + if err := os.Chmod(path, info.Mode()|0600); err != nil { + return err + } + } + return nil + }) + if err == SkipOstree { + return true, nil, nil + } + if err != nil { + return false, nil, err + } + return false, whiteouts, nil +} + +// Create prepares the filesystem for the OSTREE driver and copies the directory for the given id under the parent. +func ConvertToOSTree(repoLocation, root, id string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + repo, err := otbuiltin.OpenRepo(repoLocation) + if err != nil { + return errors.Wrap(err, "could not open the OSTree repository") + } + + skip, whiteouts, err := fixFiles(root, os.Getuid() != 0) + if err != nil { + return errors.Wrap(err, "could not prepare the OSTree directory") + } + if skip { + return nil + } + + if _, err := repo.PrepareTransaction(); err != nil { + return errors.Wrap(err, "could not prepare the OSTree transaction") + } + + if skip { + return nil + } + + commitOpts := otbuiltin.NewCommitOptions() + commitOpts.Timestamp = time.Now() + commitOpts.LinkCheckoutSpeedup = true + commitOpts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + branch := fmt.Sprintf("containers-storage/%s", id) + + for _, w := range whiteouts { + if err := os.Remove(w); err != nil { + return errors.Wrap(err, "could not delete whiteout file") + } + } + + if _, err := repo.Commit(root, branch, commitOpts); err != nil { + return errors.Wrap(err, "could not commit the layer") + } + + if _, err := repo.CommitTransaction(); err != nil { + return errors.Wrap(err, "could not complete the OSTree transaction") + } + + if err := system.EnsureRemoveAll(root); err != nil { + return errors.Wrap(err, "could not delete layer") + } + + checkoutOpts := otbuiltin.NewCheckoutOptions() + checkoutOpts.RequireHardlinks = true + checkoutOpts.Whiteouts = false + if err := otbuiltin.Checkout(repoLocation, root, branch, checkoutOpts); err != nil { + return errors.Wrap(err, "could not checkout from OSTree") + } + + for _, w := range whiteouts { + if err := unix.Mknod(w, unix.S_IFCHR, 0); err != nil { + return errors.Wrap(err, "could not recreate whiteout file") + } + } + return nil +} + +func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + _, err := os.Stat(repoLocation) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + if err := idtools.MkdirAllAs(repoLocation, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "could not create OSTree repository directory: %v") + } + + if _, err := otbuiltin.Init(repoLocation, otbuiltin.NewInitOptions()); err != nil { + return errors.Wrap(err, "could not create OSTree repository") + } + } + return nil +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +func DeleteOSTree(repoLocation, id string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := openRepo(repoLocation) + if err != nil { + return err + } + defer C.g_object_unref(C.gpointer(repo)) + + branch := fmt.Sprintf("containers-storage/%s", id) + + cbranch := C.CString(branch) + defer C.free(unsafe.Pointer(cbranch)) + + var cerr *C.GError + r := glib.GoBool(glib.GBoolean(C.ostree_repo_set_ref_immediate(repo, nil, cbranch, nil, nil, &cerr))) + if !r { + return glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go new file mode 100644 index 0000000..7738fc7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 0000000..71f205b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 0000000..76e1e49 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 0000000..e598672 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package kernel + +import ( + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h windows.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + windows.KEY_READ, + &h); err != nil { + return KVI, err + } + defer windows.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = windows.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = windows.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 0000000..e913fad --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000..49370bd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 0000000..1da3f23 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go new file mode 100644 index 0000000..acc8971 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 0000000..a15e368 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 0000000..dd52b90 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md new file mode 100644 index 0000000..6658f69 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go new file mode 100644 index 0000000..1ae728a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -0,0 +1,43 @@ +// +build linux + +package reexec + +import ( + "context" + "os/exec" + "syscall" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + } + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary, and also +// sets SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + } + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go new file mode 100644 index 0000000..1ecaa90 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -0,0 +1,30 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000..9d93742 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} + +// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go new file mode 100644 index 0000000..673ab47 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -0,0 +1,32 @@ +// +build windows + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go new file mode 100644 index 0000000..c56671d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/containers/storage/pkg/stringid/README.md b/vendor/github.com/containers/storage/pkg/stringid/README.md new file mode 100644 index 0000000..37a5098 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go new file mode 100644 index 0000000..a0c7c42 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -0,0 +1,99 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "regexp" + "strconv" + "strings" + "time" +) + +const shortLen = 12 + +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(r io.Reader) string { + b := make([]byte, 32) + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(cryptorand.Reader) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(readerFunc(rand.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) +} diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md new file mode 100644 index 0000000..b3e4545 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go new file mode 100644 index 0000000..8c4c398 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -0,0 +1,99 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go new file mode 100644 index 0000000..056d199 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go new file mode 100644 index 0000000..09d58bc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go new file mode 100644 index 0000000..45428c1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package system + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go new file mode 100644 index 0000000..2883189 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go new file mode 100644 index 0000000..60f0514 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go new file mode 100644 index 0000000..102565f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/filesys.go @@ -0,0 +1,67 @@ +// +build !windows + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go new file mode 100644 index 0000000..a61b53d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go @@ -0,0 +1,298 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go new file mode 100644 index 0000000..1793508 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go new file mode 100644 index 0000000..019c664 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go new file mode 100644 index 0000000..cff33bb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go new file mode 100644 index 0000000..e54d01e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go new file mode 100644 index 0000000..bd23c4d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go new file mode 100644 index 0000000..e51df0d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go new file mode 100644 index 0000000..3b6e947 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go new file mode 100644 index 0000000..385f1d5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go new file mode 100644 index 0000000..925776e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -0,0 +1,129 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo CFLAGS: -std=c99 +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000..3ce019d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go new file mode 100644 index 0000000..883944a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go new file mode 100644 index 0000000..af79a65 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go new file mode 100644 index 0000000..2e863c0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go new file mode 100644 index 0000000..f634a6b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go new file mode 100644 index 0000000..f3762e6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package system + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go new file mode 100644 index 0000000..aab8915 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go new file mode 100644 index 0000000..26c8b42 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go new file mode 100644 index 0000000..fc03c3e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/containers/storage/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 5 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go new file mode 100644 index 0000000..715f05b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go new file mode 100644 index 0000000..715f05b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go new file mode 100644 index 0000000..1939f95 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go new file mode 100644 index 0000000..b607dea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go new file mode 100644 index 0000000..b607dea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go new file mode 100644 index 0000000..f9a1b48 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -0,0 +1,72 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} + +// Fstat takes an open file descriptor and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file descriptor is invalid +func Fstat(fd int) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Fstat(fd, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go new file mode 100644 index 0000000..d306360 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -0,0 +1,63 @@ +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// UID returns file's user id of owner. +// +// on windows this is always 0 because there is no concept of UID +func (s StatT) UID() uint32 { + return 0 +} + +// GID returns file's group id of owner. +// +// on windows this is always 0 because there is no concept of GID +func (s StatT) GID() uint32 { + return 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go new file mode 100644 index 0000000..49dbdd3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go new file mode 100644 index 0000000..23e9b20 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -0,0 +1,122 @@ +package system + +import ( + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go new file mode 100644 index 0000000..5a10eda --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go new file mode 100644 index 0000000..13f1de1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000..6a77524 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go new file mode 100644 index 0000000..edc588a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000..1397145 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go new file mode 100644 index 0000000..98b111b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -0,0 +1,29 @@ +package system + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { + return nil, nil + } + if errno == unix.ERANGE { + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000..0114f22 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go new file mode 100644 index 0000000..74776e6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -0,0 +1,139 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addID(id) +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs and passes each of them to the given +// handler. Take care that the handler method does not call any public +// method on truncindex as the internal locking is not reentrant/recursive +// and will result in deadlock. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.Lock() + defer idx.Unlock() + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go new file mode 100644 index 0000000..d70848d --- /dev/null +++ b/vendor/github.com/containers/storage/store.go @@ -0,0 +1,3425 @@ +package storage + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + // register all of the built-in drivers + _ "github.com/containers/storage/drivers/register" + + "github.com/BurntSushi/toml" + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +var ( + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions + stores []*store + storesLock sync.Mutex +) + +// ROFileBasedStore wraps up the methods of the various types of file-based +// data stores that we implement which are needed for both read-only and +// read-write files. +type ROFileBasedStore interface { + Locker + + // Load reloads the contents of the store from disk. It should be called + // with the lock held. + Load() error +} + +// RWFileBasedStore wraps up the methods of various types of file-based data +// stores that we implement using read-write files. +type RWFileBasedStore interface { + // Save saves the contents of the store to disk. It should be called with + // the lock held, and Touch() should be called afterward before releasing the + // lock. + Save() error +} + +// FileBasedStore wraps up the common methods of various types of file-based +// data stores that we implement. +type FileBasedStore interface { + ROFileBasedStore + RWFileBasedStore +} + +// ROMetadataStore wraps a method for reading metadata associated with an ID. +type ROMetadataStore interface { + // Metadata reads metadata associated with an item with the specified ID. + Metadata(id string) (string, error) +} + +// RWMetadataStore wraps a method for setting metadata associated with an ID. +type RWMetadataStore interface { + // SetMetadata updates the metadata associated with the item with the specified ID. + SetMetadata(id, metadata string) error +} + +// MetadataStore wraps up methods for getting and setting metadata associated with IDs. +type MetadataStore interface { + ROMetadataStore + RWMetadataStore +} + +// An ROBigDataStore wraps up the read-only big-data related methods of the +// various types of file-based lookaside stores that we implement. +type ROBigDataStore interface { + // BigData retrieves a (potentially large) piece of data associated with + // this ID, if it has previously been set. + BigData(id, key string) ([]byte, error) + + // BigDataSize retrieves the size of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataSize(id, key string) (int64, error) + + // BigDataDigest retrieves the digest of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataDigest(id, key string) (digest.Digest, error) + + // BigDataNames() returns a list of the names of previously-stored pieces of + // data. + BigDataNames(id string) ([]string, error) +} + +// A RWImageBigDataStore wraps up how we store big-data associated with images. +type RWImageBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + // Pass github.com/containers/image/manifest.Digest as digestManifest + // to allow ByDigest to find images by their correct digests. + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error +} + +// A ContainerBigDataStore wraps up how we store big-data associated with containers. +type ContainerBigDataStore interface { + ROBigDataStore + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data []byte) error +} + +// A FlaggableStore can have flags set and cleared on items which it manages. +type FlaggableStore interface { + // ClearFlag removes a named flag from an item in the store. + ClearFlag(id string, flag string) error + + // SetFlag sets a named flag and its value on an item in the store. + SetFlag(id string, flag string, value interface{}) error +} + +// StoreOptions is used for passing initialization options to GetStore(), for +// initializing a Store object and the underlying storage that it controls. +type StoreOptions struct { + // RunRoot is the filesystem path under which we can store run-time + // information, such as the locations of active mount points, that we + // want to lose if the host is rebooted. + RunRoot string `json:"runroot,omitempty"` + // GraphRoot is the filesystem path under which we will store the + // contents of layers, images, and containers. + GraphRoot string `json:"root,omitempty"` + // GraphDriverName is the underlying storage driver that we'll be + // using. It only needs to be specified the first time a Store is + // initialized for a given RunRoot and GraphRoot. + GraphDriverName string `json:"driver,omitempty"` + // GraphDriverOptions are driver-specific options. + GraphDriverOptions []string `json:"driver-options,omitempty"` + // UIDMap and GIDMap are used for setting up a container's root filesystem + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` +} + +// Store wraps up the various types of file-based stores that we use into a +// singleton object that initializes and manages them all together. +type Store interface { + // RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve + // settings that were passed to GetStore() when the object was created. + RunRoot() string + GraphRoot() string + GraphDriverName() string + GraphOptions() []string + UIDMap() []idtools.IDMap + GIDMap() []idtools.IDMap + + // GraphDriver obtains and returns a handle to the graph Driver object used + // by the Store. + GraphDriver() (drivers.Driver, error) + + // CreateLayer creates a new layer in the underlying storage driver, + // optionally having the specified ID (one will be assigned if none is + // specified), with the specified layer (or no layer) as its parent, + // and with optional names. (The writeable flag is ignored.) + CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) + + // PutLayer combines the functions of CreateLayer and ApplyDiff, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) + + // CreateImage creates a new image, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // referring to a specified image, and with optional metadata. An + // image is a record which associates the ID of a layer with a + // additional bookkeeping information which the library stores for the + // convenience of its caller. + CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) + + // CreateContainer creates a new container, optionally with the + // specified ID (one will be assigned if none is specified), with + // optional names, using the specified image's top layer as the basis + // for the container's layer, and assigning the specified ID to that + // layer (one will be created if none is specified). A container is a + // layer which is associated with additional bookkeeping information + // which the library stores for the convenience of its caller. + CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // Metadata retrieves the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to). + Metadata(id string) (string, error) + + // SetMetadata updates the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to) to match + // the specified value. The metadata value can be retrieved at any + // time using Metadata, or using Layer, Image, or Container and reading + // the object directly. + SetMetadata(id, metadata string) error + + // Exists checks if there is a layer, image, or container which has the + // passed-in ID or name. + Exists(id string) bool + + // Status asks for a status report, in the form of key-value pairs, + // from the underlying storage driver. The contents vary from driver + // to driver. + Status() ([][2]string, error) + + // Delete removes the layer, image, or container which has the + // passed-in ID or name. Note that no safety checks are performed, so + // this can leave images with references to layers which do not exist, + // and layers with references to parents which no longer exist. + Delete(id string) error + + // DeleteLayer attempts to remove the specified layer. If the layer is the + // parent of any other layer, or is referred to by any images, it will return + // an error. + DeleteLayer(id string) error + + // DeleteImage removes the specified image if it is not referred to by + // any containers. If its top layer is then no longer referred to by + // any other images and is not the parent of any other layers, its top + // layer will be removed. If that layer's parent is no longer referred + // to by any other images and is not the parent of any other layers, + // then it, too, will be removed. This procedure will be repeated + // until a layer which should not be removed, or the base layer, is + // reached, at which point the list of removed layers is returned. If + // the commit argument is false, the image and layers are not removed, + // but the list of layers which would be removed is still returned. + DeleteImage(id string, commit bool) (layers []string, err error) + + // DeleteContainer removes the specified container and its layer. If + // there is no matching container, or if the container exists but its + // layer does not, an error will be returned. + DeleteContainer(id string) error + + // Wipe removes all known layers, images, and containers. + Wipe() error + + // Mount attempts to mount a layer, image, or container for access, and + // returns the pathname if it succeeds. + // Note if the mountLabel == "", the default label for the container + // will be used. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + Mount(id, mountLabel string) (string, error) + + // Unmount attempts to unmount a layer, image, or container, given an ID, a + // name, or a mount path. Returns whether or not the layer is still mounted. + Unmount(id string, force bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + Mounted(id string) (int, error) + + // Changes returns a summary of the changes which would need to be made + // to one layer to make its contents the same as a second layer. If + // the first layer is not specified, the second layer's parent is + // assumed. Each Change structure contains a Path relative to the + // layer's root directory, and a Kind which is either ChangeAdd, + // ChangeModify, or ChangeDelete. + Changes(from, to string) ([]archive.Change, error) + + // DiffSize returns a count of the size of the tarstream which would + // specify the changes returned by Changes. + DiffSize(from, to string) (int64, error) + + // Diff returns the tarstream which would specify the changes returned + // by Changes. If options are passed in, they can override default + // behaviors. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // ApplyDiff applies a tarstream to a layer. Information about the + // tarstream is cached with the layer. Typically, a layer which is + // populated using a tarstream will be expected to not be modified in + // any other way, either before or after the diff is applied. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + ApplyDiff(to string, diff io.Reader) (int64, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // LayerSize returns a cached approximation of the layer's size, or -1 + // if we don't have a value on hand. + LayerSize(id string) (int64, error) + + // LayerParentOwners returns the UIDs and GIDs of owners of parents of + // the layer's mountpoint for which the layer's UID and GID maps (if + // any are defined) don't contain corresponding IDs. + LayerParentOwners(id string) ([]int, []int, error) + + // Layers returns a list of the currently known layers. + Layers() ([]Layer, error) + + // Images returns a list of the currently known images. + Images() ([]Image, error) + + // Containers returns a list of the currently known containers. + Containers() ([]Container, error) + + // Names returns the list of names for a layer, image, or container. + Names(id string) ([]string, error) + + // SetNames changes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + SetNames(id string, names []string) error + + // ListImageBigData retrieves a list of the (possibly large) chunks of + // named data associated with an image. + ListImageBigData(id string) ([]string, error) + + // ImageBigData retrieves a (possibly large) chunk of named data + // associated with an image. + ImageBigData(id, key string) ([]byte, error) + + // ImageBigDataSize retrieves the size of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataSize(id, key string) (int64, error) + + // ImageBigDataDigest retrieves the digest of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataDigest(id, key string) (digest.Digest, error) + + // SetImageBigData stores a (possibly large) chunk of named data + // associated with an image. Pass + // github.com/containers/image/manifest.Digest as digestManifest to + // allow ImagesByDigest to find images by their correct digests. + SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + + // ImageSize computes the size of the image's layers and ancillary data. + ImageSize(id string) (int64, error) + + // ListContainerBigData retrieves a list of the (possibly large) chunks of + // named data associated with a container. + ListContainerBigData(id string) ([]string, error) + + // ContainerBigData retrieves a (possibly large) chunk of named data + // associated with a container. + ContainerBigData(id, key string) ([]byte, error) + + // ContainerBigDataSize retrieves the size of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataSize(id, key string) (int64, error) + + // ContainerBigDataDigest retrieves the digest of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataDigest(id, key string) (digest.Digest, error) + + // SetContainerBigData stores a (possibly large) chunk of named data + // associated with a container. + SetContainerBigData(id, key string, data []byte) error + + // ContainerSize computes the size of the container's layer and ancillary + // data. Warning: this is a potentially expensive operation. + ContainerSize(id string) (int64, error) + + // Layer returns a specific layer. + Layer(id string) (*Layer, error) + + // Image returns a specific image. + Image(id string) (*Image, error) + + // ImagesByTopLayer returns a list of images which reference the specified + // layer as their top layer. They will have different IDs and names + // and may have different metadata, big data items, and flags. + ImagesByTopLayer(id string) ([]*Image, error) + + // ImagesByDigest returns a list of images which contain a big data item + // named ImageDigestBigDataKey whose contents have the specified digest. + ImagesByDigest(d digest.Digest) ([]*Image, error) + + // Container returns a specific container. + Container(id string) (*Container, error) + + // ContainerByLayer returns a specific container based on its layer ID or + // name. + ContainerByLayer(id string) (*Container, error) + + // ContainerDirectory returns a path of a directory which the caller + // can use to store data, specific to the container, which the library + // does not directly manage. The directory will be deleted when the + // container is deleted. + ContainerDirectory(id string) (string, error) + + // SetContainerDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // directory. + SetContainerDirectoryFile(id, file string, data []byte) error + + // FromContainerDirectory is a convenience function which reads + // the contents of the specified file relative to the container's + // directory. + FromContainerDirectory(id, file string) ([]byte, error) + + // ContainerRunDirectory returns a path of a directory which the + // caller can use to store data, specific to the container, which the + // library does not directly manage. The directory will be deleted + // when the host system is restarted. + ContainerRunDirectory(id string) (string, error) + + // SetContainerRunDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // run directory. + SetContainerRunDirectoryFile(id, file string, data []byte) error + + // FromContainerRunDirectory is a convenience function which reads + // the contents of the specified file relative to the container's run + // directory. + FromContainerRunDirectory(id, file string) ([]byte, error) + + // ContainerParentOwners returns the UIDs and GIDs of owners of parents + // of the container's layer's mountpoint for which the layer's UID and + // GID maps (if any are defined) don't contain corresponding IDs. + ContainerParentOwners(id string) ([]int, []int, error) + + // Lookup returns the ID of a layer, image, or container with the specified + // name or ID. + Lookup(name string) (string, error) + + // Shutdown attempts to free any kernel resources which are being used + // by the underlying driver. If "force" is true, any mounted (i.e., in + // use) layers are unmounted beforehand. If "force" is not true, then + // layers being in use is considered to be an error condition. A list + // of still-mounted layers is returned along with possible errors. + Shutdown(force bool) (layers []string, err error) + + // Version returns version information, in the form of key-value pairs, from + // the storage package. + Version() ([][2]string, error) + + // GetDigestLock returns digest-specific Locker. + GetDigestLock(digest.Digest) (Locker, error) +} + +// IDMappingOptions are used for specifying how ID mapping should be set up for +// a layer or container. +type IDMappingOptions struct { + // UIDMap and GIDMap are used for setting up a layer's root filesystem + // for use inside of a user namespace where ID mapping is being used. + // If HostUIDMapping/HostGIDMapping is true, no mapping of the + // respective type will be used. Otherwise, if UIDMap and/or GIDMap + // contain at least one mapping, one or both will be used. By default, + // if neither of those conditions apply, if the layer has a parent + // layer, the parent layer's mapping will be used, and if it does not + // have a parent layer, the mapping which was passed to the Store + // object when it was initialized will be used. + HostUIDMapping bool + HostGIDMapping bool + UIDMap []idtools.IDMap + GIDMap []idtools.IDMap +} + +// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods. +type LayerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this layer. If nothing is specified, the layer will + // inherit settings from its parent layer or, if it has no parent + // layer, the Store object. + IDMappingOptions + // TemplateLayer is the ID of a layer whose contents will be used to + // initialize this layer. If set, it should be a child of the layer + // which we want to use as the parent of the new layer. + TemplateLayer string +} + +// ImageOptions is used for passing options to a Store's CreateImage() method. +type ImageOptions struct { + // CreationDate, if not zero, will override the default behavior of marking the image as having been + // created when CreateImage() was called, recording CreationDate instead. + CreationDate time.Time + // Digest is a hard-coded digest value that we can use to look up the image. It is optional. + Digest digest.Digest +} + +// ContainerOptions is used for passing options to a Store's CreateContainer() method. +type ContainerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this container's layer. If nothing is specified, the + // container's layer will inherit settings from the image's top layer + // or, if it is not being created based on an image, the Store object. + IDMappingOptions + LabelOpts []string + Flags map[string]interface{} + MountOpts []string +} + +type store struct { + lastLoaded time.Time + runRoot string + graphLock Locker + graphRoot string + graphDriverName string + graphOptions []string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + graphDriver drivers.Driver + layerStore LayerStore + roLayerStores []ROLayerStore + imageStore ImageStore + roImageStores []ROImageStore + containerStore ContainerStore + digestLockRoot string +} + +// GetStore attempts to find an already-created Store object matching the +// specified location and graph driver, and if it can't, it creates and +// initializes a new Store object, and the underlying storage that it controls. +// +// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used. +// +// These defaults observe environment variables: +// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use +// * `STORAGE_OPTS` for the string of options to pass to the driver +// +// Note that we do some of this work in a child process. The calling process's +// main() function needs to import our pkg/reexec package and should begin with +// something like this in order to allow us to properly start that child +// process: +// if reexec.Init() { +// return +// } +func GetStore(options StoreOptions) (Store, error) { + if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { + options = defaultStoreOptions + } + + if options.GraphRoot != "" { + dir, err := filepath.Abs(options.GraphRoot) + if err != nil { + return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot) + } + options.GraphRoot = dir + } + if options.RunRoot != "" { + dir, err := filepath.Abs(options.RunRoot) + if err != nil { + return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot) + } + options.RunRoot = dir + } + + storesLock.Lock() + defer storesLock.Unlock() + + for _, s := range stores { + if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + return s, nil + } + } + + if options.GraphRoot == "" { + return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified") + } + if options.RunRoot == "" { + return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified") + } + + if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { + if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + } + + graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) + if err != nil { + return nil, err + } + s := &store{ + runRoot: options.RunRoot, + graphLock: graphLock, + graphRoot: options.GraphRoot, + graphDriverName: options.GraphDriverName, + graphOptions: options.GraphDriverOptions, + uidMap: copyIDMap(options.UIDMap), + gidMap: copyIDMap(options.GIDMap), + } + if err := s.load(); err != nil { + return nil, err + } + + stores = append(stores, s) + + return s, nil +} + +func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { + m := []idtools.IDMap{} + if idmap != nil { + m = make([]idtools.IDMap, len(idmap)) + copy(m, idmap) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func (s *store) RunRoot() string { + return s.runRoot +} + +func (s *store) GraphDriverName() string { + return s.graphDriverName +} + +func (s *store) GraphRoot() string { + return s.graphRoot +} + +func (s *store) GraphOptions() []string { + return s.graphOptions +} + +func (s *store) UIDMap() []idtools.IDMap { + return copyIDMap(s.uidMap) +} + +func (s *store) GIDMap() []idtools.IDMap { + return copyIDMap(s.gidMap) +} + +func (s *store) load() error { + driver, err := s.GraphDriver() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + driverPrefix := s.graphDriverName + "-" + + rls, err := s.LayerStore() + if err != nil { + return err + } + s.layerStore = rls + if _, err := s.ROLayerStores(); err != nil { + return err + } + + gipath := filepath.Join(s.graphRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0700); err != nil { + return err + } + ris, err := newImageStore(gipath) + if err != nil { + return err + } + s.imageStore = ris + if _, err := s.ROImageStores(); err != nil { + return err + } + + gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return err + } + rcs, err := newContainerStore(gcpath) + if err != nil { + return err + } + rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return err + } + s.containerStore = rcs + + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") + if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { + return err + } + + return nil +} + +// GetDigestLock returns a digest-specific Locker. +func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { + return GetLockfile(filepath.Join(s.digestLockRoot, d.String())) +} + +func (s *store) getGraphDriver() (drivers.Driver, error) { + if s.graphDriver != nil { + return s.graphDriver, nil + } + config := drivers.Options{ + Root: s.graphRoot, + DriverOptions: s.graphOptions, + UIDMaps: s.uidMap, + GIDMaps: s.gidMap, + } + driver, err := drivers.New(s.graphDriverName, config) + if err != nil { + return nil, err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return driver, nil +} + +func (s *store) GraphDriver() (drivers.Driver, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + return s.getGraphDriver() +} + +// LayerStore obtains and returns a handle to the writeable layer store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) LayerStore() (LayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + if s.layerStore != nil { + return s.layerStore, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0700); err != nil { + return nil, err + } + rls, err := newLayerStore(rlpath, glpath, driver, s.uidMap, s.gidMap) + if err != nil { + return nil, err + } + s.layerStore = rls + return s.layerStore, nil +} + +// ROLayerStores obtains additional read/only layer store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not part of the exported Store interface. +func (s *store) ROLayerStores() ([]ROLayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.roLayerStores != nil { + return s.roLayerStores, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + for _, store := range driver.AdditionalImageStores() { + glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.roLayerStores = append(s.roLayerStores, rls) + } + return s.roLayerStores, nil +} + +// ImageStore obtains and returns a handle to the writable image store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ImageStore() (ImageStore, error) { + if s.imageStore != nil { + return s.imageStore, nil + } + return nil, ErrLoadError +} + +// ROImageStores obtains additional read/only image store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ROImageStores() ([]ROImageStore, error) { + if len(s.roImageStores) != 0 { + return s.roImageStores, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + for _, store := range driver.AdditionalImageStores() { + gipath := filepath.Join(store, driverPrefix+"images") + ris, err := newROImageStore(gipath) + if err != nil { + return nil, err + } + s.roImageStores = append(s.roImageStores, ris) + } + return s.roImageStores, nil +} + +// ContainerStore obtains and returns a handle to the container store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ContainerStore() (ContainerStore, error) { + if s.containerStore != nil { + return s.containerStore, nil + } + return nil, ErrLoadError +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) { + var parentLayer *Layer + rlstore, err := s.LayerStore() + if err != nil { + return nil, -1, err + } + rlstores, err := s.ROLayerStores() + if err != nil { + return nil, -1, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, -1, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, -1, err + } + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, -1, err + } + } + if id == "" { + id = stringid.GenerateRandomID() + } + if options == nil { + options = &LayerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + uidMap := options.UIDMap + gidMap := options.GIDMap + if parent != "" { + var ilayer *Layer + for _, l := range append([]ROLayerStore{rlstore}, rlstores...) { + lstore := l + if lstore != rlstore { + lstore.Lock() + defer lstore.Unlock() + if modified, err := lstore.Modified(); modified || err != nil { + if err = lstore.Load(); err != nil { + return nil, -1, err + } + } + } + if l, err := lstore.Get(parent); err == nil && l != nil { + ilayer = l + parent = ilayer.ID + break + } + } + if ilayer == nil { + return nil, -1, ErrLayerUnknown + } + parentLayer = ilayer + containers, err := rcstore.Containers() + if err != nil { + return nil, -1, err + } + for _, container := range containers { + if container.LayerID == parent { + return nil, -1, ErrParentIsContainer + } + } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } else { + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + var layerOptions *LayerOptions + if s.graphDriver.SupportsShifting() { + layerOptions = &LayerOptions{IDMappingOptions: IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}} + } else { + layerOptions = &LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + }, + } + } + return rlstore.Put(id, parentLayer, names, mountLabel, nil, layerOptions, writeable, nil, diff) +} + +func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { + layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, options, nil) + return layer, err +} + +func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { + if id == "" { + id = stringid.GenerateRandomID() + } + + if layer != "" { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + var ilayer *Layer + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + ilayer, err = store.Get(layer) + if err == nil { + break + } + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID + } + + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return nil, err + } + } + + creationDate := time.Now().UTC() + if options != nil && !options.CreationDate.IsZero() { + creationDate = options.CreationDate + } + + return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) +} + +func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options IDMappingOptions) (*Layer, error) { + layerMatchesMappingOptions := func(layer *Layer, options IDMappingOptions) bool { + // If the driver supports shifting and the layer has no mappings, we can use it. + if s.graphDriver.SupportsShifting() && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + return true + } + // If we want host mapping, and the layer uses mappings, it's not the best match. + if options.HostUIDMapping && len(layer.UIDMap) != 0 { + return false + } + if options.HostGIDMapping && len(layer.GIDMap) != 0 { + return false + } + // If we don't care about the mapping, it's fine. + if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 { + return true + } + // Compare the maps. + return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) + } + var layer, parentLayer *Layer + allStores := append([]ROLayerStore{rlstore}, lstores...) + // Locate the image's top layer and its parent, if it has one. + for _, s := range allStores { + store := s + if store != rlstore { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + } + // Walk the top layer list. + for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if cLayer, err := store.Get(candidate); err == nil { + // We want the layer's parent, too, if it has one. + var cParentLayer *Layer + if cLayer.Parent != "" { + // Its parent should be in one of the stores, somewhere. + for _, ps := range allStores { + if cParentLayer, err = ps.Get(cLayer.Parent); err == nil { + break + } + } + if cParentLayer == nil { + continue + } + } + // If the layer matches the desired mappings, it's a perfect match, + // so we're actually done here. + if layerMatchesMappingOptions(cLayer, options) { + return cLayer, nil + } + // Record the first one that we found, even if it's not ideal, so that + // we have a starting point. + if layer == nil { + layer = cLayer + parentLayer = cParentLayer + } + } + } + } + if layer == nil { + return nil, ErrLayerUnknown + } + // The top layer's mappings don't match the ones we want, but it's in a read-only + // image store, so we can't create and add a mapped copy of the layer to the image. + // We'll have to do the mapping for the container itself, elsewhere. + if !createMappedLayer { + return layer, nil + } + // The top layer's mappings don't match the ones we want, and it's in an image store + // that lets us edit image metadata... + if istore, ok := ristore.(*imageStore); ok { + // ... so create a duplicate of the layer with the desired mappings, and + // register it as an alternate top layer in the image. + var layerOptions LayerOptions + if s.graphDriver.SupportsShifting() { + layerOptions = LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + } else { + layerOptions = LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + }, + } + } + layerOptions.TemplateLayer = layer.ID + mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) + if err != nil { + return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID) + } + if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2)) + } + return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID) + } + layer = mappedLayer + } + return layer, nil +} + +func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { + if options == nil { + options = &ContainerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + if id == "" { + id = stringid.GenerateRandomID() + } + + var imageTopLayer *Layer + imageID := "" + uidMap := options.UIDMap + gidMap := options.GIDMap + + idMappingsOptions := options.IDMappingOptions + if image != "" { + var imageHomeStore ROImageStore + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, err + } + } + var cimage *Image + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + cimage, err = store.Get(image) + if err == nil { + imageHomeStore = store + break + } + } + if cimage == nil { + return nil, ErrImageUnknown + } + imageID = cimage.ID + + if cimage.TopLayer != "" { + createMappedLayer := imageHomeStore == istore + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions) + if err != nil { + return nil, err + } + imageTopLayer = ilayer + + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } + } else { + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, err + } + } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + var layerOptions *LayerOptions + if s.graphDriver.SupportsShifting() { + layerOptions = &LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + } else { + layerOptions = &LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: idMappingsOptions.HostUIDMapping, + HostGIDMapping: idMappingsOptions.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + }, + } + } + if options.Flags == nil { + options.Flags = make(map[string]interface{}) + } + plabel, _ := options.Flags["ProcessLabel"].(string) + mlabel, _ := options.Flags["MountLabel"].(string) + if (plabel == "" && mlabel != "") || + (plabel != "" && mlabel == "") { + return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified") + } + + if plabel == "" { + processLabel, mountLabel, err := label.InitLabels(options.LabelOpts) + if err != nil { + return nil, err + } + options.Flags["ProcessLabel"] = processLabel + options.Flags["MountLabel"] = mountLabel + } + + clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true) + if err != nil { + return nil, err + } + layer = clayer.ID + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + options.IDMappingOptions = IDMappingOptions{ + HostUIDMapping: len(options.UIDMap) == 0, + HostGIDMapping: len(options.GIDMap) == 0, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + container, err := rcstore.Create(id, names, imageID, layer, metadata, options) + if err != nil || container == nil { + rlstore.Delete(layer) + } + return container, err +} + +func (s *store) SetMetadata(id, metadata string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err := ristore.Load(); err != nil { + return err + } + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return err + } + } + + if rlstore.Exists(id) { + return rlstore.SetMetadata(id, metadata) + } + if ristore.Exists(id) { + return ristore.SetMetadata(id, metadata) + } + if rcstore.Exists(id) { + return rcstore.SetMetadata(id, metadata) + } + return ErrNotAnID +} + +func (s *store) Metadata(id string) (string, error) { + lstore, err := s.LayerStore() + if err != nil { + return "", err + } + lstores, err := s.ROLayerStores() + if err != nil { + return "", err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return "", err + } + } + if store.Exists(id) { + return store.Metadata(id) + } + } + + istore, err := s.ImageStore() + if err != nil { + return "", err + } + istores, err := s.ROImageStores() + if err != nil { + return "", err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return "", err + } + } + if store.Exists(id) { + return store.Metadata(id) + } + } + + cstore, err := s.ContainerStore() + if err != nil { + return "", err + } + cstore.RLock() + defer cstore.Unlock() + if modified, err := cstore.Modified(); modified || err != nil { + if err = cstore.Load(); err != nil { + return "", err + } + } + if cstore.Exists(id) { + return cstore.Metadata(id) + } + return "", ErrNotAnID +} + +func (s *store) ListImageBigData(id string) ([]string, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + bigDataNames, err := store.BigDataNames(id) + if err == nil { + return bigDataNames, err + } + } + return nil, ErrImageUnknown +} + +func (s *store) ImageBigDataSize(id, key string) (int64, error) { + istore, err := s.ImageStore() + if err != nil { + return -1, err + } + istores, err := s.ROImageStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return -1, err + } + } + size, err := store.BigDataSize(id, key) + if err == nil { + return size, nil + } + } + return -1, ErrSizeUnknown +} + +func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { + ristore, err := s.ImageStore() + if err != nil { + return "", err + } + stores, err := s.ROImageStores() + if err != nil { + return "", err + } + stores = append([]ROImageStore{ristore}, stores...) + for _, r := range stores { + ristore := r + ristore.RLock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return "", nil + } + } + d, err := ristore.BigDataDigest(id, key) + if err == nil && d.Validate() == nil { + return d, nil + } + } + return "", ErrDigestUnknown +} + +func (s *store) ImageBigData(id, key string) ([]byte, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + data, err := store.BigData(id, key) + if err == nil { + return data, nil + } + } + return nil, ErrImageUnknown +} + +func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + ristore, err := s.ImageStore() + if err != nil { + return err + } + + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return nil + } + } + + return ristore.SetBigData(id, key, data, digestManifest) +} + +func (s *store) ImageSize(id string) (int64, error) { + var image *Image + + lstore, err := s.LayerStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary layer store data") + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional layer stores") + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return -1, err + } + } + } + + var imageStore ROBigDataStore + istore, err := s.ImageStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary image store data") + } + istores, err := s.ROImageStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional image stores") + } + + // Look for the image's record. + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return -1, err + } + } + if image, err = store.Get(id); err == nil { + imageStore = store + break + } + } + if image == nil { + return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + + // Start with a list of the image's top layers. + queue := make(map[string]struct{}) + for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + queue[layerID] = struct{}{} + } + visited := make(map[string]struct{}) + // Walk all of the layers. + var size int64 + for len(visited) < len(queue) { + for layerID := range queue { + // Visit each layer only once. + if _, ok := visited[layerID]; ok { + continue + } + visited[layerID] = struct{}{} + // Look for the layer and the store that knows about it. + var layerStore ROLayerStore + var layer *Layer + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(layerID); err == nil { + layerStore = store + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID) + } + // The UncompressedSize is only valid if there's a digest to go with it. + n := layer.UncompressedSize + if layer.UncompressedDigest == "" { + // Compute the size. + n, err = layerStore.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID) + } + } + // Count this layer. + size += n + // Make a note to visit the layer's parent if we haven't already. + if layer.Parent != "" { + queue[layer.Parent] = struct{}{} + } + } + } + + // Count big data items. + names, err := imageStore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id) + } + for _, name := range names { + n, err := imageStore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id) + } + size += n + } + + return size, nil +} + +func (s *store) ContainerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return -1, err + } + } + } + + // Get the location of the container directory and container run directory. + // Do it before we lock the container store because they do, too. + cdir, err := s.ContainerDirectory(id) + if err != nil { + return -1, err + } + rdir, err := s.ContainerRunDirectory(id) + if err != nil { + return -1, err + } + + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return -1, err + } + } + + // Read the container record. + container, err := rcstore.Get(id) + if err != nil { + return -1, err + } + + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID) + } + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID) + } + + // Count big data items. + names, err := rcstore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID) + } + for _, name := range names { + n, err := rcstore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id) + } + size += n + } + + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) + if err != nil { + return -1, err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return -1, err + } + size += n + + return size, nil +} + +func (s *store) ListContainerBigData(id string) ([]string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + + return rcstore.BigDataNames(id) +} + +func (s *store) ContainerBigDataSize(id, key string) (int64, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return -1, err + } + } + return rcstore.BigDataSize(id, key) +} + +func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return "", err + } + } + return rcstore.BigDataDigest(id, key) +} + +func (s *store) ContainerBigData(id, key string) ([]byte, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + return rcstore.BigData(id, key) +} + +func (s *store) SetContainerBigData(id, key string, data []byte) error { + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return err + } + } + return rcstore.SetBigData(id, key, data) +} + +func (s *store) Exists(id string) bool { + lstore, err := s.LayerStore() + if err != nil { + return false + } + lstores, err := s.ROLayerStores() + if err != nil { + return false + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return false + } + } + if store.Exists(id) { + return true + } + } + + istore, err := s.ImageStore() + if err != nil { + return false + } + istores, err := s.ROImageStores() + if err != nil { + return false + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return false + } + } + if store.Exists(id) { + return true + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return false + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return false + } + } + if rcstore.Exists(id) { + return true + } + + return false +} + +func dedupeNames(names []string) []string { + seen := make(map[string]bool) + deduped := make([]string, 0, len(names)) + for _, name := range names { + if _, wasSeen := seen[name]; !wasSeen { + seen[name] = true + deduped = append(deduped, name) + } + } + return deduped +} + +func (s *store) SetNames(id string, names []string) error { + deduped := dedupeNames(names) + + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + if rlstore.Exists(id) { + return rlstore.SetNames(id, deduped) + } + + ristore, err := s.ImageStore() + if err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return err + } + } + if ristore.Exists(id) { + return ristore.SetNames(id, deduped) + } + + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return err + } + } + if rcstore.Exists(id) { + return rcstore.SetNames(id, deduped) + } + return ErrLayerUnknown +} + +func (s *store) Names(id string) ([]string, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + if l, err := store.Get(id); l != nil && err == nil { + return l.Names, nil + } + } + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + if i, err := store.Get(id); i != nil && err == nil { + return i.Names, nil + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + if c, err := rcstore.Get(id); c != nil && err == nil { + return c.Names, nil + } + return nil, ErrLayerUnknown +} + +func (s *store) Lookup(name string) (string, error) { + lstore, err := s.LayerStore() + if err != nil { + return "", err + } + lstores, err := s.ROLayerStores() + if err != nil { + return "", err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return "", err + } + } + if l, err := store.Get(name); l != nil && err == nil { + return l.ID, nil + } + } + + istore, err := s.ImageStore() + if err != nil { + return "", err + } + istores, err := s.ROImageStores() + if err != nil { + return "", err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return "", err + } + } + if i, err := store.Get(name); i != nil && err == nil { + return i.ID, nil + } + } + + cstore, err := s.ContainerStore() + if err != nil { + return "", err + } + cstore.RLock() + defer cstore.Unlock() + if modified, err := cstore.Modified(); modified || err != nil { + if err = cstore.Load(); err != nil { + return "", err + } + } + if c, err := cstore.Get(name); c != nil && err == nil { + return c.ID, nil + } + + return "", ErrLayerUnknown +} + +func (s *store) DeleteLayer(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return err + } + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return err + } + } + + if rlstore.Exists(id) { + if l, err := rlstore.Get(id); err != nil { + id = l.ID + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return ErrLayerHasChildren + } + } + images, err := ristore.Images() + if err != nil { + return err + } + for _, image := range images { + if image.TopLayer == id || stringutils.InSlice(image.MappedTopLayers, id) { + return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID) + } + } + containers, err := rcstore.Containers() + if err != nil { + return err + } + for _, container := range containers { + if container.LayerID == id { + return errors.Wrapf(ErrLayerUsedByContainer, "Layer %v used by container %v", id, container.ID) + } + } + return rlstore.Delete(id) + } + return ErrNotALayer +} + +func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, err + } + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return nil, err + } + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + layersToRemove := []string{} + if ristore.Exists(id) { + image, err := ristore.Get(id) + if err != nil { + return nil, err + } + id = image.ID + containers, err := rcstore.Containers() + if err != nil { + return nil, err + } + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID + } + if container, ok := aContainerByImage[id]; ok { + return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container) + } + images, err := ristore.Images() + if err != nil { + return nil, err + } + layers, err := rlstore.Layers() + if err != nil { + return nil, err + } + childrenByParent := make(map[string]*[]string) + for _, layer := range layers { + parent := layer.Parent + if list, ok := childrenByParent[parent]; ok { + newList := append(*list, layer.ID) + childrenByParent[parent] = &newList + } else { + childrenByParent[parent] = &([]string{layer.ID}) + } + } + otherImagesByTopLayer := make(map[string]string) + for _, img := range images { + if img.ID != id { + otherImagesByTopLayer[img.TopLayer] = img.ID + for _, layerID := range img.MappedTopLayers { + otherImagesByTopLayer[layerID] = img.ID + } + } + } + if commit { + if err = ristore.Delete(id); err != nil { + return nil, err + } + } + layer := image.TopLayer + lastRemoved := "" + for layer != "" { + if rcstore.Exists(layer) { + break + } + if _, ok := otherImagesByTopLayer[layer]; ok { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + hasOtherRefs := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList, ok := childrenByParent[layer]; ok && childList != nil { + children := *childList + for _, child := range children { + if child != lastRemoved { + return true + } + } + } + } + return false + } + if hasOtherRefs() { + break + } + lastRemoved = layer + if layer == image.TopLayer { + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + } + layersToRemove = append(layersToRemove, lastRemoved) + layer = parent + } + } else { + return nil, ErrNotAnImage + } + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return nil, err + } + } + } + return layersToRemove, nil +} + +func (s *store) DeleteContainer(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return err + } + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return err + } + } + + if rcstore.Exists(id) { + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } + } + return ErrNotAContainer +} + +func (s *store) Delete(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err := ristore.Load(); err != nil { + return err + } + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return err + } + } + + if rcstore.Exists(id) { + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } + return ErrNotALayer + } + } + if ristore.Exists(id) { + return ristore.Delete(id) + } + if rlstore.Exists(id) { + return rlstore.Delete(id) + } + return ErrLayerUnknown +} + +func (s *store) Wipe() error { + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rlstore, err := s.LayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + if err = ristore.Load(); err != nil { + return err + } + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return err + } + } + + if err = rcstore.Wipe(); err != nil { + return err + } + if err = ristore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() +} + +func (s *store) Status() ([][2]string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + return rlstore.Status() +} + +func (s *store) Version() ([][2]string, error) { + return [][2]string{}, nil +} + +func (s *store) Mount(id, mountLabel string) (string, error) { + container, err := s.Container(id) + var ( + uidMap, gidMap []idtools.IDMap + mountOpts []string + ) + if err == nil { + uidMap, gidMap = container.UIDMap, container.GIDMap + id = container.LayerID + mountOpts = container.MountOpts() + } + rlstore, err := s.LayerStore() + if err != nil { + return "", err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return "", err + } + } + if rlstore.Exists(id) { + options := drivers.MountOpts{ + MountLabel: mountLabel, + UidMaps: uidMap, + GidMaps: gidMap, + Options: mountOpts, + } + return rlstore.Mount(id, options) + } + return "", ErrLayerUnknown +} + +func (s *store) Mounted(id string) (int, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.LayerStore() + if err != nil { + return 0, err + } + rlstore.RLock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return 0, err + } + } + + return rlstore.Mounted(id) +} + +func (s *store) Unmount(id string, force bool) (bool, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.LayerStore() + if err != nil { + return false, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return false, err + } + } + if rlstore.Exists(id) { + return rlstore.Unmount(id, force) + } + return false, ErrLayerUnknown +} + +func (s *store) Changes(from, to string) ([]archive.Change, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + if store.Exists(to) { + return store.Changes(from, to) + } + } + return nil, ErrLayerUnknown +} + +func (s *store) DiffSize(from, to string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return -1, err + } + } + if store.Exists(to) { + return store.DiffSize(from, to) + } + } + return -1, ErrLayerUnknown +} + +func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + if store.Exists(to) { + rc, err := store.Diff(from, to, options) + if rc != nil && err == nil { + wrapped := ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + store.Unlock() + return err + }) + return wrapped, nil + } + store.Unlock() + return rc, err + } + store.Unlock() + } + return nil, ErrLayerUnknown +} + +func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { + rlstore, err := s.LayerStore() + if err != nil { + return -1, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return -1, err + } + } + if rlstore.Exists(to) { + return rlstore.ApplyDiff(to, diff) + } + return -1, ErrLayerUnknown +} + +func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { + var layers []Layer + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + storeLayers, err := m(store, d) + if err != nil { + if errors.Cause(err) != ErrLayerUnknown { + return nil, err + } + continue + } + layers = append(layers, storeLayers...) + } + if len(layers) == 0 { + return nil, ErrLayerUnknown + } + return layers, nil +} + +func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) + } + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) +} + +func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) + } + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) +} + +func (s *store) LayerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return -1, err + } + } + if store.Exists(id) { + return store.Size(id) + } + } + return -1, ErrLayerUnknown +} + +func (s *store) LayerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rlstore.RLock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, nil, err + } + } + if rlstore.Exists(id) { + return rlstore.ParentOwners(id) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, nil, err + } + rlstore.RLock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, nil, err + } + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, nil, err + } + } + container, err := rcstore.Get(id) + if err != nil { + return nil, nil, err + } + if rlstore.Exists(container.LayerID) { + return rlstore.ParentOwners(container.LayerID) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) Layers() ([]Layer, error) { + var layers []Layer + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + storeLayers, err := store.Layers() + if err != nil { + return nil, err + } + layers = append(layers, storeLayers...) + } + return layers, nil +} + +func (s *store) Images() ([]Image, error) { + var images []Image + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + storeImages, err := store.Images() + if err != nil { + return nil, err + } + images = append(images, storeImages...) + } + return images, nil +} + +func (s *store) Containers() ([]Container, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + + return rcstore.Containers() +} + +func (s *store) Layer(id string) (*Layer, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + layer, err := store.Get(id) + if err == nil { + return layer, nil + } + } + return nil, ErrLayerUnknown +} + +func (s *store) Image(id string) (*Image, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + image, err := store.Get(id) + if err == nil { + return image, nil + } + } + return nil, ErrImageUnknown +} + +func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { + images := []*Image{} + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + imageList, err := store.Images() + if err != nil { + return nil, err + } + for _, image := range imageList { + if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { + images = append(images, &image) + } + } + } + return images, nil +} + +func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { + images := []*Image{} + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, store := range append([]ROImageStore{istore}, istores...) { + store.RLock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + if err = store.Load(); err != nil { + return nil, err + } + } + imageList, err := store.ByDigest(d) + if err != nil && err != ErrImageUnknown { + return nil, err + } + images = append(images, imageList...) + } + return images, nil +} + +func (s *store) Container(id string) (*Container, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + + return rcstore.Get(id) +} + +func (s *store) ContainerLayerID(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return "", err + } + } + container, err := rcstore.Get(id) + if err != nil { + return "", err + } + return container.LayerID, nil +} + +func (s *store) ContainerByLayer(id string) (*Container, error) { + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return nil, err + } + } + containerList, err := rcstore.Containers() + if err != nil { + return nil, err + } + for _, container := range containerList { + if container.LayerID == layer.ID { + return &container, nil + } + } + + return nil, ErrContainerUnknown +} + +func (s *store) ContainerDirectory(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return "", err + } + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return "", err + } + return gcpath, nil +} + +func (s *store) ContainerRunDirectory(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + + rcstore.RLock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + if err = rcstore.Load(); err != nil { + return "", err + } + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return "", err + } + return rcpath, nil +} + +func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) Shutdown(force bool) ([]string, error) { + mounted := []string{} + modified := false + + rlstore, err := s.LayerStore() + if err != nil { + return mounted, err + } + + s.graphLock.Lock() + defer s.graphLock.Unlock() + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, err + } + } + + layers, err := rlstore.Layers() + if err != nil { + return mounted, err + } + for _, layer := range layers { + if layer.MountCount == 0 { + continue + } + mounted = append(mounted, layer.ID) + if force { + for layer.MountCount > 0 { + _, err2 := rlstore.Unmount(layer.ID, force) + if err2 != nil { + if err == nil { + err = err2 + } + break + } + modified = true + } + } + } + if len(mounted) > 0 && err == nil { + err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted") + } + if err == nil { + err = s.graphDriver.Cleanup() + s.graphLock.Touch() + modified = true + } + if modified { + rlstore.Touch() + } + return mounted, err +} + +// Convert a BigData key name into an acceptable file name. +func makeBigDataBaseName(key string) string { + reader := strings.NewReader(key) + for reader.Len() > 0 { + ch, size, err := reader.ReadRune() + if err != nil || size != 1 { + break + } + if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { + break + } + } + if reader.Len() > 0 { + return "=" + base64.StdEncoding.EncodeToString([]byte(key)) + } + return key +} + +func stringSliceWithoutValue(slice []string, value string) []string { + modified := make([]string, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +func copyStringSlice(slice []string) []string { + if len(slice) == 0 { + return nil + } + ret := make([]string, len(slice)) + copy(ret, slice) + return ret +} + +func copyStringInt64Map(m map[string]int64) map[string]int64 { + ret := make(map[string]int64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest { + ret := make(map[string]digest.Digest, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyDigestSlice(slice []digest.Digest) []digest.Digest { + if len(slice) == 0 { + return nil + } + ret := make([]digest.Digest, len(slice)) + copy(ret, slice) + return ret +} + +// copyStringInterfaceMap still forces us to assume that the interface{} is +// a non-pointer scalar value +func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { + ret := make(map[string]interface{}, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +// defaultConfigFile path to the system wide storage.conf file +const defaultConfigFile = "/etc/containers/storage.conf" + +// DefaultConfigFile returns the path to the storage config file used +func DefaultConfigFile(rootless bool) (string, error) { + if rootless { + home, err := homeDir() + if err != nil { + return "", errors.Wrapf(err, "cannot determine users homedir") + } + return filepath.Join(home, ".config/containers/storage.conf"), nil + } + return defaultConfigFile, nil +} + +// TOML-friendly explicit tables used for conversions. +type tomlConfig struct { + Storage struct { + Driver string `toml:"driver"` + RunRoot string `toml:"runroot"` + GraphRoot string `toml:"graphroot"` + Options struct{ config.OptionsConfig } `toml:"options"` + } `toml:"storage"` +} + +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { + data, err := ioutil.ReadFile(configFile) + if err != nil { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + return + } + } + + config := new(tomlConfig) + + if _, err := toml.Decode(string(data), config); err != nil { + fmt.Printf("Failed to parse %s %v\n", configFile, err.Error()) + return + } + if config.Storage.Driver != "" { + storeOptions.GraphDriverName = config.Storage.Driver + } + if config.Storage.RunRoot != "" { + storeOptions.RunRoot = config.Storage.RunRoot + } + if config.Storage.GraphRoot != "" { + storeOptions.GraphRoot = config.Storage.GraphRoot + } + if config.Storage.Options.Thinpool.AutoExtendPercent != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent)) + } + + if config.Storage.Options.Thinpool.AutoExtendThreshold != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold)) + } + + if config.Storage.Options.Thinpool.BaseSize != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize)) + } + if config.Storage.Options.Thinpool.BlockSize != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize)) + } + if config.Storage.Options.Thinpool.DirectLvmDevice != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice)) + } + if config.Storage.Options.Thinpool.DirectLvmDeviceForce != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce)) + } + if config.Storage.Options.Thinpool.Fs != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs)) + } + if config.Storage.Options.Thinpool.LogLevel != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel)) + } + if config.Storage.Options.Thinpool.MinFreeSpace != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace)) + } + if config.Storage.Options.Thinpool.MkfsArg != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg)) + } + if config.Storage.Options.Thinpool.MountOpt != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.Thinpool.MountOpt)) + } + if config.Storage.Options.Thinpool.UseDeferredDeletion != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion)) + } + if config.Storage.Options.Thinpool.UseDeferredRemoval != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval)) + } + if config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries)) + } + for _, s := range config.Storage.Options.AdditionalImageStores { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) + } + if config.Storage.Options.Size != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + } + if config.Storage.Options.OstreeRepo != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ostree_repo=%s", config.Storage.Driver, config.Storage.Options.OstreeRepo)) + } + if config.Storage.Options.SkipMountHome != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + } + if config.Storage.Options.MountProgram != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) + } + if config.Storage.Options.MountOpt != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { + config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser + } + if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" { + config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { + mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) + if err != nil { + fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) + return + } + storeOptions.UIDMap = mappings.UIDs() + storeOptions.GIDMap = mappings.GIDs() + } + + uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") + if err != nil { + fmt.Print(err) + } else { + storeOptions.UIDMap = append(storeOptions.UIDMap, uidmap...) + } + gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") + if err != nil { + fmt.Print(err) + } else { + storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...) + } + if os.Getenv("STORAGE_DRIVER") != "" { + storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") + } + if os.Getenv("STORAGE_OPTS") != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) + } + if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { + storeOptions.GraphDriverOptions = nil + } +} + +func init() { + defaultStoreOptions.RunRoot = "/var/run/containers/storage" + defaultStoreOptions.GraphRoot = "/var/lib/containers/storage" + defaultStoreOptions.GraphDriverName = "" + + ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions) +} + +// GetDefaultMountOptions returns the default mountoptions defined in container/storage +func GetDefaultMountOptions() ([]string, error) { + return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions) +} + +// GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions +func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) { + mountOpts := []string{ + ".mountopt", + fmt.Sprintf("%s.mountopt", driver), + } + for _, option := range graphDriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + for _, m := range mountOpts { + if m == key { + return strings.Split(val, ","), nil + } + } + } + return nil, nil +} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go new file mode 100644 index 0000000..fafaaab --- /dev/null +++ b/vendor/github.com/containers/storage/utils.go @@ -0,0 +1,252 @@ +package storage + +import ( + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "strconv" + "strings" + + "github.com/BurntSushi/toml" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) { + options := IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 { + GIDMapSlice = UIDMapSlice + } + if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 { + UIDMapSlice = GIDMapSlice + } + if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap) + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice) + } + parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice) + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUid int) (string, error) { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + + if runtimeDir != "" { + return runtimeDir, nil + } + tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid) + st, err := system.Stat(tmpDir) + if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 { + return tmpDir, nil + } + tmpDir = fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid) + if err := os.MkdirAll(tmpDir, 0700); err != nil { + logrus.Errorf("failed to create %s: %v", tmpDir, err) + } else { + return tmpDir, nil + } + home, err := homeDir() + if err != nil { + return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty") + } + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + return "", errors.Wrapf(err, "cannot resolve %s", home) + } + return filepath.Join(resolvedHome, "rundir"), nil +} + +// getRootlessDirInfo returns the parent path of where the storage for containers and +// volumes will be in rootless mode +func getRootlessDirInfo(rootlessUid int) (string, string, error) { + rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid) + if err != nil { + return "", "", err + } + + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home, err := homeDir() + if err != nil { + return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty") + } + // runc doesn't like symlinks in the rootfs path, and at least + // on CoreOS /home is a symlink to /var/home, so resolve any symlink. + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + return "", "", errors.Wrapf(err, "cannot resolve %s", home) + } + dataDir = filepath.Join(resolvedHome, ".local", "share") + } + return dataDir, rootlessRuntime, nil +} + +// getRootlessStorageOpts returns the storage opts for containers running as non root +func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) { + var opts StoreOptions + + dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid) + if err != nil { + return opts, err + } + opts.RunRoot = rootlessRuntime + opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.GraphDriverName = "overlay" + opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} + } else { + opts.GraphDriverName = "vfs" + } + return opts, nil +} + +type tomlOptionsConfig struct { + MountProgram string `toml:"mount_program"` +} + +func getTomlStorage(storeOptions *StoreOptions) *tomlConfig { + config := new(tomlConfig) + + config.Storage.Driver = storeOptions.GraphDriverName + config.Storage.RunRoot = storeOptions.RunRoot + config.Storage.GraphRoot = storeOptions.GraphRoot + for _, i := range storeOptions.GraphDriverOptions { + s := strings.Split(i, "=") + if s[0] == "overlay.mount_program" { + config.Storage.Options.MountProgram = s[1] + } + } + + return config +} + +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { + uid := getRootlessUID() + return DefaultStoreOptions(uid != 0, uid) +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { + var ( + defaultRootlessRunRoot string + defaultRootlessGraphRoot string + err error + ) + storageOpts := defaultStoreOptions + if rootless && rootlessUid != 0 { + storageOpts, err = getRootlessStorageOpts(rootlessUid) + if err != nil { + return storageOpts, err + } + } + + storageConf, err := DefaultConfigFile(rootless && rootlessUid != 0) + if err != nil { + return storageOpts, err + } + if _, err = os.Stat(storageConf); err == nil { + defaultRootlessRunRoot = storageOpts.RunRoot + defaultRootlessGraphRoot = storageOpts.GraphRoot + storageOpts = StoreOptions{} + ReloadConfigurationFile(storageConf, &storageOpts) + } + + if !os.IsNotExist(err) { + return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf) + } + + if rootless && rootlessUid != 0 { + if err == nil { + // If the file did not specify a graphroot or runroot, + // set sane defaults so we don't try and use root-owned + // directories + if storageOpts.RunRoot == "" { + storageOpts.RunRoot = defaultRootlessRunRoot + } + if storageOpts.GraphRoot == "" { + storageOpts.GraphRoot = defaultRootlessGraphRoot + } + } else { + if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { + return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf)) + } + file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return storageOpts, errors.Wrapf(err, "cannot open %s", storageConf) + } + + tomlConfiguration := getTomlStorage(&storageOpts) + defer file.Close() + enc := toml.NewEncoder(file) + if err := enc.Encode(tomlConfiguration); err != nil { + os.Remove(storageConf) + + return storageOpts, errors.Wrapf(err, "failed to encode %s", storageConf) + } + } + } + return storageOpts, nil +} + +func homeDir() (string, error) { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.Current() + if err != nil { + return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty") + } + home = usr.HomeDir + } + return home, nil +} diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf new file mode 100644 index 0000000..62a3f98 --- /dev/null +++ b/vendor/github.com/containers/storage/vendor.conf @@ -0,0 +1,28 @@ +github.com/BurntSushi/toml master +github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 +github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 +github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 +github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 +github.com/klauspost/compress v1.4.1 +github.com/klauspost/cpuid v1.2.0 +github.com/klauspost/pgzip v1.2.1 +github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 +github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 +github.com/opencontainers/go-digest master +github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 +github.com/opencontainers/selinux v1.1 +github.com/ostreedev/ostree-go master +github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 +github.com/pkg/errors master +github.com/pmezard/go-difflib v1.0.0 +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +github.com/sirupsen/logrus v1.0.0 +github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/syndtr/gocapability master +github.com/tchap/go-patricia v2.2.6 +github.com/vbatts/tar-split v0.10.2 +golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 +golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 +gotest.tools master +github.com/google/go-cmp master diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index aaf0298..0000000 --- a/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Antonio Mercado -Antonio Murdaca -Anton Tiurin -Anusha Ragunathan -a-palchikov -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -davidli -David Verhasselt -David Xia -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hua Wang -Hu Keping -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jonathan Boulle -Jon Johnson -Jon Poler -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Ke Xu -liuchang0812 -Liu Hua -Li Yi -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matthew Green -Matt Moore -Matt Robenolt -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victoria Bialas -Victor Vieux -Vincent Batts -Vincent Demeester -Vincent Giersch -weiyuan.yl -W. Trevor King -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yixi zhang -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md new file mode 100644 index 0000000..9988788 --- /dev/null +++ b/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,130 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator](https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 0000000..145b078 --- /dev/null +++ b/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,257 @@ +package distribution + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identified by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digestset/set.go similarity index 90% rename from vendor/github.com/docker/distribution/digest/set.go rename to vendor/github.com/docker/distribution/digestset/set.go index 4b9313c..71327dc 100644 --- a/vendor/github.com/docker/distribution/digest/set.go +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -1,10 +1,12 @@ -package digest +package digestset import ( "errors" "sort" "strings" "sync" + + digest "github.com/opencontainers/go-digest" ) var ( @@ -44,7 +46,7 @@ func NewSet() *Set { // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. -func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false @@ -64,7 +66,7 @@ func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (Digest, error) { +func (dst *Set) Lookup(d string) (digest.Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { @@ -72,11 +74,11 @@ func (dst *Set) Lookup(d string) (Digest, error) { } var ( searchFunc func(int) bool - alg Algorithm + alg digest.Algorithm hex string ) - dgst, err := ParseDigest(d) - if err == ErrDigestInvalidFormat { + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d @@ -108,7 +110,7 @@ func (dst *Set) Lookup(d string) (Digest, error) { // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. -func (dst *Set) Add(d Digest) error { +func (dst *Set) Add(d digest.Digest) error { if err := d.Validate(); err != nil { return err } @@ -139,7 +141,7 @@ func (dst *Set) Add(d Digest) error { // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d Digest) error { +func (dst *Set) Remove(d digest.Digest) error { if err := d.Validate(); err != nil { return err } @@ -167,10 +169,10 @@ func (dst *Set) Remove(d Digest) error { } // All returns all the digests in the set -func (dst *Set) All() []Digest { +func (dst *Set) All() []digest.Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() - retValues := make([]Digest, len(dst.entries)) + retValues := make([]digest.Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } @@ -183,10 +185,10 @@ func (dst *Set) All() []Digest { // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[Digest]string { +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() - m := make(map[Digest]string, len(dst.entries)) + m := make(map[digest.Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { @@ -222,9 +224,9 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string { } type digestEntry struct { - alg Algorithm + alg digest.Algorithm val string - digest Digest + digest digest.Digest } type digestEntries []*digestEntry diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 0000000..bdd8cb7 --- /dev/null +++ b/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 0000000..020d332 --- /dev/null +++ b/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,115 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return "unverified manifest" +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 0000000..1816bae --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "context" + "fmt" + "mime" + + "github.com/opencontainers/go-digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the media type. + Payload() (mediaType string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediaType string + if ctHeader != "" { + var err error + mediaType, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediaType] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) + } + mappings[mediaType] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 0000000..978df7e --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 0000000..2d71fc5 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 0278662..2f66cca 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -4,30 +4,32 @@ // Grammar // // reference := name [ ":" tag ] [ "@" digest ] -// name := [hostname '/'] component ['/' component]* -// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ -// component := alpha-numeric [separator alpha-numeric]* +// path-component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ package reference import ( "errors" "fmt" - "path" "strings" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) const ( @@ -53,6 +55,9 @@ var ( // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") ) // Reference is an opaque object reference identifier that may include @@ -126,23 +131,56 @@ type Digested interface { } // Canonical reference is an object with a fully unique -// name including a name with hostname and digest +// name including a name with domain and digest type Canonical interface { Named Digest() digest.Digest } +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name +// DEPRECATED: Use Domain or Path func SplitHostname(named Named) (string, string) { - name := named.Name() - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() } - return match[1], match[2] + return splitDomain(named.Name()) } // Parse parses s and returns a syntactically valid Reference. @@ -164,13 +202,24 @@ func Parse(s string) (Reference, error) { return nil, ErrNameTooLong } + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + ref := reference{ - name: matches[1], - tag: matches[2], + namedRepository: repo, + tag: matches[2], } if matches[3] != "" { var err error - ref.digest, err = digest.ParseDigest(matches[3]) + ref.digest, err = digest.Parse(matches[3]) if err != nil { return nil, err } @@ -185,18 +234,17 @@ func Parse(s string) (Reference, error) { } // ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { - ref, err := Parse(s) + named, err := ParseNormalizedNamed(s) if err != nil { return nil, err } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) + if named.String() != s { + return nil, ErrNameNotCanonical } return named, nil } @@ -207,10 +255,15 @@ func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } - if !anchoredNameRegexp.MatchString(name) { + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } - return repository(name), nil + return repository{ + domain: match[1], + path: match[2], + }, nil } // WithTag combines the name from "name" and the tag from "tag" to form a @@ -219,16 +272,23 @@ func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } if canonical, ok := name.(Canonical); ok { return reference{ - name: name.Name(), - tag: tag, - digest: canonical.Digest(), + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), }, nil } return taggedReference{ - name: name.Name(), - tag: tag, + namedRepository: repo, + tag: tag, }, nil } @@ -238,36 +298,37 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } if tagged, ok := name.(Tagged); ok { return reference{ - name: name.Name(), - tag: tagged.Tag(), - digest: digest, + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, }, nil } return canonicalReference{ - name: name.Name(), - digest: digest, + namedRepository: repo, + digest: digest, }, nil } -// Match reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func Match(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, ref.String()) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, namedRef.Name()) - } - return matched, err -} - // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - return repository(ref.Name()) + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } } func getBestReferenceType(ref reference) Reference { - if ref.name == "" { + if ref.Name() == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) @@ -277,16 +338,16 @@ func getBestReferenceType(ref reference) Reference { if ref.tag == "" { if ref.digest != "" { return canonicalReference{ - name: ref.name, - digest: ref.digest, + namedRepository: ref.namedRepository, + digest: ref.digest, } } - return repository(ref.name) + return ref.namedRepository } if ref.digest == "" { return taggedReference{ - name: ref.name, - tag: ref.tag, + namedRepository: ref.namedRepository, + tag: ref.tag, } } @@ -294,17 +355,13 @@ func getBestReferenceType(ref reference) Reference { } type reference struct { - name string + namedRepository tag string digest digest.Digest } func (r reference) String() string { - return r.name + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Name() string { - return r.name + return r.Name() + ":" + r.tag + "@" + r.digest.String() } func (r reference) Tag() string { @@ -315,20 +372,34 @@ func (r reference) Digest() digest.Digest { return r.digest } -type repository string +type repository struct { + domain string + path string +} func (r repository) String() string { - return string(r) + return r.Name() } func (r repository) Name() string { - return string(r) + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path } type digestReference digest.Digest func (d digestReference) String() string { - return d.String() + return digest.Digest(d).String() } func (d digestReference) Digest() digest.Digest { @@ -336,16 +407,12 @@ func (d digestReference) Digest() digest.Digest { } type taggedReference struct { - name string - tag string + namedRepository + tag string } func (t taggedReference) String() string { - return t.name + ":" + t.tag -} - -func (t taggedReference) Name() string { - return t.name + return t.Name() + ":" + t.tag } func (t taggedReference) Tag() string { @@ -353,16 +420,12 @@ func (t taggedReference) Tag() string { } type canonicalReference struct { - name string + namedRepository digest digest.Digest } func (c canonicalReference) String() string { - return c.name + "@" + c.digest.String() -} - -func (c canonicalReference) Name() string { - return c.name + return c.Name() + "@" + c.digest.String() } func (c canonicalReference) Digest() digest.Digest { diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go index 9a7d366..7860349 100644 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -19,18 +19,18 @@ var ( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) - // hostnameComponentRegexp restricts the registry hostname component of a - // repository name to start with a component as defined by hostnameRegexp + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - // hostnameRegexp defines the structure of potential hostname components + // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. - hostnameRegexp = expression( - hostnameComponentRegexp, - optional(repeated(literal(`.`), hostnameComponentRegexp)), + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. @@ -48,17 +48,17 @@ var ( anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the hostname and name part omitting + // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( - optional(hostnameRegexp, literal(`/`)), + optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the - // hostname and trailing components. + // domain and trailing components. anchoredNameRegexp = anchored( - optional(capture(hostnameRegexp), literal(`/`)), + optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) @@ -68,6 +68,25 @@ var ( ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) ) // match compiles the string to a regular expression. diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 0000000..c34207d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,98 @@ +package distribution + +import ( + "context" + + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 0000000..6d9bb4b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 0000000..49a64a8 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 0000000..d1e8826 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 0000000..a9616c5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1596 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 0000000..cde0119 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 0000000..97d6923 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go new file mode 100644 index 0000000..9bc41a3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 0000000..5b80d5b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,49 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameCatalog, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 0000000..1337bdb --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,266 @@ +package v2 + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var ( + scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme + } + + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } + } + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + default: + return "", fmt.Errorf("reference must have a tag or digest") + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 0000000..2c3ebe1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 0000000..c9bdfc3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challanges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challanges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challanges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challanges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 0000000..695bf85 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,162 @@ +package client + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/distribution" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 0000000..52d49d5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,139 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range challenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 0000000..8bd2c3f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,853 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/opencontainers/go-digest" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: checkHTTPRedirect, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +func (r *repository) Named() reference.Named { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.name, + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.name, + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.name, + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Named(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + for { + resp, err := t.client.Get(u) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + u = strings.Trim(strings.Split(link, ";")[0], "<>") + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) + } + } +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.Parse(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + newRequest := func(method string) (*http.Response, error) { + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + resp, err := t.client.Do(req) + return resp, err + } + + resp, err := newRequest("HEAD") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + default: + // if the response is an error - there will be no body to decode. + // Issue a GET request: + // - for data from a server that does not handle HEAD + // - to get error details in case of a failure + resp, err = newRequest("GET") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return descriptorFromResponse(resp) + } + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + ) + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + } else if opt, ok := option.(contentDigestOption); ok { + contentDgst = opt.digest + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } + } + + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.Parse(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.Digester() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 0000000..e5ff09d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,251 @@ +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we sought to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + + _, err := hrs.reader() + if err != nil { + hrs.readerOffset = lastReaderOffset + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case os.SEEK_CUR: + newOffset += offset + case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + offset + case os.SEEK_SET: + newOffset = offset + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.rc, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + req.Header.Add("Accept-Encoding", "identity") + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 0000000..30e45fa --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 0000000..10a3909 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 0000000..cdc34f5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,121 @@ +package cache + +import ( + "context" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// Logger can be provided on the MetricsTracker to log errors. +// +// Usually, this is just a proxy to dcontext.GetLogger. +type Logger interface { + Errorf(format string, args ...interface{}) +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics + Logger(context.Context) Logger +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} + +func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { + if tracker == nil { + return + } + + logger := tracker.Logger(ctx) + if logger == nil { + return + } + logger.Errorf(format, args...) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 0000000..42d94d9 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,179 @@ +package memory + +import ( + "context" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/opencontainers/go-digest" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNormalizedNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return repo.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.ErrBlobUnknown + } + + return repo.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + if repo == nil { + // allocate map since we are setting it now. + var ok bool + // have to read back value since we may have allocated elsewhere. + repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + repo = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = repo + } + rsimbdcp.repository = repo + } + rsimbdcp.parent.mu.Unlock() + + if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 0000000..f22df2b --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf new file mode 100644 index 0000000..d67edd7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -0,0 +1,43 @@ +github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e +github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 0000000..1ea555e --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/README.md b/vendor/github.com/docker/docker-credential-helpers/README.md new file mode 100644 index 0000000..f9cbc3f --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/README.md @@ -0,0 +1,82 @@ +## Introduction + +docker-credential-helpers is a suite of programs to use native stores to keep Docker credentials safe. + +## Installation + +Go to the [Releases](https://github.com/docker/docker-credential-helpers/releases) page and download the binary that works better for you. Put that binary in your `$PATH`, so Docker can find it. + +### Building from scratch + +The programs in this repository are written with the Go programming language. These instructions assume that you have previous knowledge about the language and you have it installed in your machine. + +1 - Download the source and put it in your `$GOPATH` with `go get`. + +``` +$ go get github.com/docker/docker-credential-helpers +``` + +2 - Use `make` to build the program you want. That will leave any executable in the `bin` directory inside the repository. + +``` +$ cd $GOPATH/docker/docker-credentials-helpers +$ make osxkeychain +``` + +3 - Put that binary in your `$PATH`, so Docker can find it. + +## Usage + +### With the Docker Engine + +Set the `credsStore` option in your `.docker/config.json` file with the suffix of the program you want to use. For instance, set it to `osxkeychain` if you want to use `docker-credential-osxkeychain`. + +```json +{ + "credsStore": "osxkeychain" +} +``` + +### With other command line applications + +The sub-package [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) includes +functions to call external programs from your own command line applications. + +There are three things you need to know if you need to interact with a helper: + +1. The name of the program to execute, for instance `docker-credential-osxkeychain`. +2. The server address to identify the credentials, for instance `https://example.com`. +3. The username and secret to store, when you want to store credentials. + +You can see examples of each function in the [client](https://godoc.org/github.com/docker/docker-credential-helpers/client) documentation. + +### Available programs + +1. osxkeychain: Provides a helper to use the OS X keychain as credentials store. +2. secretservice: Provides a helper to use the D-Bus secret service as credentials store. +3. wincred: Provides a helper to use Windows credentials manager as store. +4. pass: Provides a helper to use `pass` as credentials store. + +#### Note + +`pass` needs to be configured for `docker-credential-pass` to work properly. +It must be initialized with a `gpg2` key ID. Make sure your GPG key exists is in `gpg2` keyring as `pass` uses `gpg2` instead of the regular `gpg`. + +## Development + +A credential helper can be any program that can read values from the standard input. We use the first argument in the command line to differentiate the kind of command to execute. There are four valid values: + +- `store`: Adds credentials to the keychain. The payload in the standard input is a JSON document with `ServerURL`, `Username` and `Secret`. +- `get`: Retrieves credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`. +- `erase`: Removes credentials from the keychain. The payload in the standard input is the raw value for the `ServerURL`. +- `list`: Lists stored credentials. There is no standard input payload. + +This repository also includes libraries to implement new credentials programs in Go. Adding a new helper program is pretty easy. You can see how the OS X keychain helper works in the [osxkeychain](osxkeychain) directory. + +1. Implement the interface `credentials.Helper` in `YOUR_PACKAGE/YOUR_PACKAGE_$GOOS.go` +2. Create a main program in `YOUR_PACKAGE/cmd/main_$GOOS.go`. +3. Add make tasks to build your program and run tests. + +## License + +MIT. See [LICENSE](LICENSE) for more information. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 0000000..d1d0434 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program("store") + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program("get") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program("erase") + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program("list") + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 0000000..8da3343 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,56 @@ +package client + +import ( + "fmt" + "io" + "os" + "os/exec" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + programCmd.Env = os.Environ() + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 0000000..da8b594 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,186 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + var err error + if len(os.Args) != 2 { + err = fmt.Errorf("Usage: %s ", os.Args[0]) + } + + if err == nil { + err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + } + + if err != nil { + fmt.Fprintf(os.Stdout, "%v\n", err) + os.Exit(1) + } +} + +// HandleCommand uses a helper and a key to run a credential action. +func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { + switch key { + case "store": + return Store(helper, in) + case "get": + return Get(helper, in, out) + case "erase": + return Erase(helper, in) + case "list": + return List(helper, out) + case "version": + return PrintVersion(out) + } + return fmt.Errorf("Unknown credential action `%s`", key) +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + resp := Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + } + + buffer.Reset() + if err := json.NewEncoder(buffer).Encode(resp); err != nil { + return err + } + + fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +//List returns all the serverURLs of keys in +//the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +//PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + fmt.Fprintln(writer, Version) + return nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 0000000..fe6a5ae --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,102 @@ +package credentials + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + _, ok := err.(errCredentialsNotFound) + return ok +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + _, ok := err.(errCredentialsMissingServerURL) + return ok +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + _, ok := err.(errCredentialsMissingUsername) + return ok +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 0000000..135acd2 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 0000000..033a5fe --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,4 @@ +package credentials + +// Version holds a string describing the current version +const Version = "0.6.0" diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c new file mode 100644 index 0000000..f84d61e --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.c @@ -0,0 +1,228 @@ +#include "osxkeychain_darwin.h" +#include +#include +#include +#include + +char *get_error(OSStatus status) { + char *buf = malloc(128); + CFStringRef str = SecCopyErrorMessageString(status, NULL); + int success = CFStringGetCString(str, buf, 128, kCFStringEncodingUTF8); + if (!success) { + strncpy(buf, "Unknown error", 128); + } + return buf; +} + +char *keychain_add(struct Server *server, char *label, char *username, char *secret) { + SecKeychainItemRef item; + + OSStatus status = SecKeychainAddInternetPassword( + NULL, + strlen(server->host), server->host, + 0, NULL, + strlen(username), username, + strlen(server->path), server->path, + server->port, + server->proto, + kSecAuthenticationTypeDefault, + strlen(secret), secret, + &item + ); + + if (status) { + return get_error(status); + } + + SecKeychainAttribute attribute; + SecKeychainAttributeList attrs; + attribute.tag = kSecLabelItemAttr; + attribute.data = label; + attribute.length = strlen(label); + attrs.count = 1; + attrs.attr = &attribute; + + status = SecKeychainItemModifyContent(item, &attrs, 0, NULL); + + if (status) { + return get_error(status); + } + + return NULL; +} + +char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret) { + char *tmp; + SecKeychainItemRef item; + + OSStatus status = SecKeychainFindInternetPassword( + NULL, + strlen(server->host), server->host, + 0, NULL, + 0, NULL, + strlen(server->path), server->path, + server->port, + server->proto, + kSecAuthenticationTypeDefault, + secret_l, (void **)&tmp, + &item); + + if (status) { + return get_error(status); + } + + *secret = strdup(tmp); + SecKeychainItemFreeContent(NULL, tmp); + + SecKeychainAttributeList list; + SecKeychainAttribute attr; + + list.count = 1; + list.attr = &attr; + attr.tag = kSecAccountItemAttr; + + status = SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL); + if (status) { + return get_error(status); + } + + *username = strdup(attr.data); + *username_l = attr.length; + SecKeychainItemFreeContent(&list, NULL); + + return NULL; +} + +char *keychain_delete(struct Server *server) { + SecKeychainItemRef item; + + OSStatus status = SecKeychainFindInternetPassword( + NULL, + strlen(server->host), server->host, + 0, NULL, + 0, NULL, + strlen(server->path), server->path, + server->port, + server->proto, + kSecAuthenticationTypeDefault, + 0, NULL, + &item); + + if (status) { + return get_error(status); + } + + status = SecKeychainItemDelete(item); + if (status) { + return get_error(status); + } + return NULL; +} + +char * CFStringToCharArr(CFStringRef aString) { + if (aString == NULL) { + return NULL; + } + CFIndex length = CFStringGetLength(aString); + CFIndex maxSize = + CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1; + char *buffer = (char *)malloc(maxSize); + if (CFStringGetCString(aString, buffer, maxSize, + kCFStringEncodingUTF8)) { + return buffer; + } + return NULL; +} + +char *keychain_list(char *credsLabel, char *** paths, char *** accts, unsigned int *list_l) { + CFStringRef credsLabelCF = CFStringCreateWithCString(NULL, credsLabel, kCFStringEncodingUTF8); + CFMutableDictionaryRef query = CFDictionaryCreateMutable (NULL, 1, NULL, NULL); + CFDictionaryAddValue(query, kSecClass, kSecClassInternetPassword); + CFDictionaryAddValue(query, kSecReturnAttributes, kCFBooleanTrue); + CFDictionaryAddValue(query, kSecMatchLimit, kSecMatchLimitAll); + CFDictionaryAddValue(query, kSecAttrLabel, credsLabelCF); + //Use this query dictionary + CFTypeRef result= NULL; + OSStatus status = SecItemCopyMatching( + query, + &result); + + CFRelease(credsLabelCF); + + //Ran a search and store the results in result + if (status) { + return get_error(status); + } + CFIndex numKeys = CFArrayGetCount(result); + *paths = (char **) malloc((int)sizeof(char *)*numKeys); + *accts = (char **) malloc((int)sizeof(char *)*numKeys); + //result is of type CFArray + for(CFIndex i=0; i +*/ +import "C" +import ( + "errors" + "net/url" + "strconv" + "strings" + "unsafe" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// errCredentialsNotFound is the specific error message returned by OS X +// when the credentials are not in the keychain. +const errCredentialsNotFound = "The specified item could not be found in the keychain." + +// Osxkeychain handles secrets using the OS X Keychain as store. +type Osxkeychain struct{} + +// Add adds new credentials to the keychain. +func (h Osxkeychain) Add(creds *credentials.Credentials) error { + h.Delete(creds.ServerURL) + + s, err := splitServer(creds.ServerURL) + if err != nil { + return err + } + defer freeServer(s) + + label := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(label)) + username := C.CString(creds.Username) + defer C.free(unsafe.Pointer(username)) + secret := C.CString(creds.Secret) + defer C.free(unsafe.Pointer(secret)) + + errMsg := C.keychain_add(s, label, username, secret) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + return errors.New(C.GoString(errMsg)) + } + + return nil +} + +// Delete removes credentials from the keychain. +func (h Osxkeychain) Delete(serverURL string) error { + s, err := splitServer(serverURL) + if err != nil { + return err + } + defer freeServer(s) + + errMsg := C.keychain_delete(s) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + return errors.New(C.GoString(errMsg)) + } + + return nil +} + +// Get returns the username and secret to use for a given registry server URL. +func (h Osxkeychain) Get(serverURL string) (string, string, error) { + s, err := splitServer(serverURL) + if err != nil { + return "", "", err + } + defer freeServer(s) + + var usernameLen C.uint + var username *C.char + var secretLen C.uint + var secret *C.char + defer C.free(unsafe.Pointer(username)) + defer C.free(unsafe.Pointer(secret)) + + errMsg := C.keychain_get(s, &usernameLen, &username, &secretLen, &secret) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + goMsg := C.GoString(errMsg) + if goMsg == errCredentialsNotFound { + return "", "", credentials.NewErrCredentialsNotFound() + } + + return "", "", errors.New(goMsg) + } + + user := C.GoStringN(username, C.int(usernameLen)) + pass := C.GoStringN(secret, C.int(secretLen)) + return user, pass, nil +} + +// List returns the stored URLs and corresponding usernames. +func (h Osxkeychain) List() (map[string]string, error) { + credsLabelC := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(credsLabelC)) + + var pathsC **C.char + defer C.free(unsafe.Pointer(pathsC)) + var acctsC **C.char + defer C.free(unsafe.Pointer(acctsC)) + var listLenC C.uint + errMsg := C.keychain_list(credsLabelC, &pathsC, &acctsC, &listLenC) + if errMsg != nil { + defer C.free(unsafe.Pointer(errMsg)) + goMsg := C.GoString(errMsg) + return nil, errors.New(goMsg) + } + + defer C.freeListData(&pathsC, listLenC) + defer C.freeListData(&acctsC, listLenC) + + var listLen int + listLen = int(listLenC) + pathTmp := (*[1 << 30]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen] + acctTmp := (*[1 << 30]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen] + //taking the array of c strings into go while ignoring all the stuff irrelevant to credentials-helper + resp := make(map[string]string) + for i := 0; i < listLen; i++ { + if C.GoString(pathTmp[i]) == "0" { + continue + } + resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i]) + } + return resp, nil +} + +func splitServer(serverURL string) (*C.struct_Server, error) { + u, err := parseURL(serverURL) + if err != nil { + return nil, err + } + + proto := C.kSecProtocolTypeHTTPS + if u.Scheme == "http" { + proto = C.kSecProtocolTypeHTTP + } + var port int + p := getPort(u) + if p != "" { + port, err = strconv.Atoi(p) + if err != nil { + return nil, err + } + } + + return &C.struct_Server{ + proto: C.SecProtocolType(proto), + host: C.CString(getHostname(u)), + port: C.uint(port), + path: C.CString(u.Path), + }, nil +} + +func freeServer(s *C.struct_Server) { + C.free(unsafe.Pointer(s.host)) + C.free(unsafe.Pointer(s.path)) +} + +// parseURL parses and validates a given serverURL to an url.URL, and +// returns an error if validation failed. Querystring parameters are +// omitted in the resulting URL, because they are not used in the helper. +// +// If serverURL does not have a valid scheme, `//` is used as scheme +// before parsing. This prevents the hostname being used as path, +// and the credentials being stored without host. +func parseURL(serverURL string) (*url.URL, error) { + // Check if serverURL has a scheme, otherwise add `//` as scheme. + if !strings.Contains(serverURL, "://") && !strings.HasPrefix(serverURL, "//") { + serverURL = "//" + serverURL + } + + u, err := url.Parse(serverURL) + if err != nil { + return nil, err + } + + if u.Scheme != "" && u.Scheme != "https" && u.Scheme != "http" { + return nil, errors.New("unsupported scheme: " + u.Scheme) + } + if getHostname(u) == "" { + return nil, errors.New("no hostname in URL") + } + + u.RawQuery = "" + return u, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h new file mode 100644 index 0000000..c54e7d7 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/osxkeychain_darwin.h @@ -0,0 +1,14 @@ +#include + +struct Server { + SecProtocolType proto; + char *host; + char *path; + unsigned int port; +}; + +char *keychain_add(struct Server *server, char *label, char *username, char *secret); +char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret); +char *keychain_delete(struct Server *server); +char *keychain_list(char *credsLabel, char *** data, char *** accts, unsigned int *list_l); +void freeListData(char *** data, unsigned int length); \ No newline at end of file diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go new file mode 100644 index 0000000..0b7297d --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_go18.go @@ -0,0 +1,13 @@ +//+build go1.8 + +package osxkeychain + +import "net/url" + +func getHostname(u *url.URL) string { + return u.Hostname() +} + +func getPort(u *url.URL) string { + return u.Port() +} diff --git a/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go new file mode 100644 index 0000000..bdf9b7b --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/osxkeychain/url_non_go18.go @@ -0,0 +1,41 @@ +//+build !go1.8 + +package osxkeychain + +import ( + "net/url" + "strings" +) + +func getHostname(u *url.URL) string { + return stripPort(u.Host) +} + +func getPort(u *url.URL) string { + return portOnly(u.Host) +} + +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c new file mode 100644 index 0000000..35dea92 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.c @@ -0,0 +1,162 @@ +#include +#include +#include "secretservice_linux.h" + +const SecretSchema *docker_get_schema(void) +{ + static const SecretSchema docker_schema = { + "io.docker.Credentials", SECRET_SCHEMA_NONE, + { + { "label", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "server", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "username", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "docker_cli", SECRET_SCHEMA_ATTRIBUTE_STRING }, + { "NULL", 0 }, + } + }; + return &docker_schema; +} + +GError *add(char *label, char *server, char *username, char *secret) { + GError *err = NULL; + + secret_password_store_sync (DOCKER_SCHEMA, SECRET_COLLECTION_DEFAULT, + server, secret, NULL, &err, + "label", label, + "server", server, + "username", username, + "docker_cli", "1", + NULL); + return err; +} + +GError *delete(char *server) { + GError *err = NULL; + + secret_password_clear_sync(DOCKER_SCHEMA, NULL, &err, + "server", server, + "docker_cli", "1", + NULL); + if (err != NULL) + return err; + return NULL; +} + +char *get_attribute(const char *attribute, SecretItem *item) { + GHashTable *attributes; + GHashTableIter iter; + gchar *value, *key; + + attributes = secret_item_get_attributes(item); + g_hash_table_iter_init(&iter, attributes); + while (g_hash_table_iter_next(&iter, (void **)&key, (void **)&value)) { + if (strncmp(key, attribute, strlen(key)) == 0) + return (char *)value; + } + g_hash_table_unref(attributes); + return NULL; +} + +GError *get(char *server, char **username, char **secret) { + GError *err = NULL; + GHashTable *attributes; + SecretService *service; + GList *items, *l; + SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK; + SecretValue *secretValue; + gsize length; + gchar *value; + + attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free); + g_hash_table_insert(attributes, g_strdup("server"), g_strdup(server)); + g_hash_table_insert(attributes, g_strdup("docker_cli"), g_strdup("1")); + + service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err); + if (err == NULL) { + items = secret_service_search_sync(service, DOCKER_SCHEMA, attributes, flags, NULL, &err); + if (err == NULL) { + for (l = items; l != NULL; l = g_list_next(l)) { + value = secret_item_get_schema_name(l->data); + if (strncmp(value, "io.docker.Credentials", strlen(value)) != 0) { + g_free(value); + continue; + } + g_free(value); + secretValue = secret_item_get_secret(l->data); + if (secret != NULL) { + *secret = strdup(secret_value_get(secretValue, &length)); + secret_value_unref(secretValue); + } + *username = get_attribute("username", l->data); + } + g_list_free_full(items, g_object_unref); + } + g_object_unref(service); + } + g_hash_table_unref(attributes); + if (err != NULL) { + return err; + } + return NULL; +} + +GError *list(char *ref_label, char *** paths, char *** accts, unsigned int *list_l) { + GList *items; + GError *err = NULL; + SecretService *service; + SecretSearchFlags flags = SECRET_SEARCH_LOAD_SECRETS | SECRET_SEARCH_ALL | SECRET_SEARCH_UNLOCK; + GHashTable *attributes = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free); + + // List credentials with the right label only + g_hash_table_insert(attributes, g_strdup("label"), g_strdup(ref_label)); + + service = secret_service_get_sync(SECRET_SERVICE_NONE, NULL, &err); + if (err != NULL) { + return err; + } + + items = secret_service_search_sync(service, NULL, attributes, flags, NULL, &err); + int numKeys = g_list_length(items); + if (err != NULL) { + return err; + } + + char **tmp_paths = (char **) calloc(1,(int)sizeof(char *)*numKeys); + char **tmp_accts = (char **) calloc(1,(int)sizeof(char *)*numKeys); + + // items now contains our keys from the gnome keyring + // we will now put it in our two lists to return it to go + GList *current; + int listNumber = 0; + for(current = items; current!=NULL; current = current->next) { + char *pathTmp = secret_item_get_label(current->data); + // you cannot have a key without a label in the gnome keyring + char *acctTmp = get_attribute("username",current->data); + if (acctTmp==NULL) { + acctTmp = "account not defined"; + } + + tmp_paths[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(pathTmp)+1)); + tmp_accts[listNumber] = (char *) calloc(1, sizeof(char)*(strlen(acctTmp)+1)); + + memcpy(tmp_paths[listNumber], pathTmp, sizeof(char)*(strlen(pathTmp)+1)); + memcpy(tmp_accts[listNumber], acctTmp, sizeof(char)*(strlen(acctTmp)+1)); + + listNumber = listNumber + 1; + } + + *paths = (char **) realloc(tmp_paths, (int)sizeof(char *)*listNumber); + *accts = (char **) realloc(tmp_accts, (int)sizeof(char *)*listNumber); + + *list_l = listNumber; + + return NULL; +} + +void freeListData(char *** data, unsigned int length) { + int i; + for(i=0; i +*/ +import "C" +import ( + "errors" + "unsafe" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// Secretservice handles secrets using Linux secret-service as a store. +type Secretservice struct{} + +// Add adds new credentials to the keychain. +func (h Secretservice) Add(creds *credentials.Credentials) error { + if creds == nil { + return errors.New("missing credentials") + } + credsLabel := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(credsLabel)) + server := C.CString(creds.ServerURL) + defer C.free(unsafe.Pointer(server)) + username := C.CString(creds.Username) + defer C.free(unsafe.Pointer(username)) + secret := C.CString(creds.Secret) + defer C.free(unsafe.Pointer(secret)) + + if err := C.add(credsLabel, server, username, secret); err != nil { + defer C.g_error_free(err) + errMsg := (*C.char)(unsafe.Pointer(err.message)) + return errors.New(C.GoString(errMsg)) + } + return nil +} + +// Delete removes credentials from the store. +func (h Secretservice) Delete(serverURL string) error { + if serverURL == "" { + return errors.New("missing server url") + } + server := C.CString(serverURL) + defer C.free(unsafe.Pointer(server)) + + if err := C.delete(server); err != nil { + defer C.g_error_free(err) + errMsg := (*C.char)(unsafe.Pointer(err.message)) + return errors.New(C.GoString(errMsg)) + } + return nil +} + +// Get returns the username and secret to use for a given registry server URL. +func (h Secretservice) Get(serverURL string) (string, string, error) { + if serverURL == "" { + return "", "", errors.New("missing server url") + } + var username *C.char + defer C.free(unsafe.Pointer(username)) + var secret *C.char + defer C.free(unsafe.Pointer(secret)) + server := C.CString(serverURL) + defer C.free(unsafe.Pointer(server)) + + err := C.get(server, &username, &secret) + if err != nil { + defer C.g_error_free(err) + errMsg := (*C.char)(unsafe.Pointer(err.message)) + return "", "", errors.New(C.GoString(errMsg)) + } + user := C.GoString(username) + pass := C.GoString(secret) + if pass == "" { + return "", "", credentials.NewErrCredentialsNotFound() + } + return user, pass, nil +} + +// List returns the stored URLs and corresponding usernames for a given credentials label +func (h Secretservice) List() (map[string]string, error) { + credsLabelC := C.CString(credentials.CredsLabel) + defer C.free(unsafe.Pointer(credsLabelC)) + + var pathsC **C.char + defer C.free(unsafe.Pointer(pathsC)) + var acctsC **C.char + defer C.free(unsafe.Pointer(acctsC)) + var listLenC C.uint + err := C.list(credsLabelC, &pathsC, &acctsC, &listLenC) + if err != nil { + defer C.free(unsafe.Pointer(err)) + return nil, errors.New("Error from list function in secretservice_linux.c likely due to error in secretservice library") + } + defer C.freeListData(&pathsC, listLenC) + defer C.freeListData(&acctsC, listLenC) + + resp := make(map[string]string) + + listLen := int(listLenC) + if listLen == 0 { + return resp, nil + } + // The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible + // with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system + // and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656 + pathTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen] + acctTmp := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen] + for i := 0; i < listLen; i++ { + resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i]) + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h new file mode 100644 index 0000000..a28179d --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/secretservice/secretservice_linux.h @@ -0,0 +1,13 @@ +#define SECRET_WITH_UNSTABLE 1 +#define SECRET_API_SUBJECT_TO_CHANGE 1 +#include + +const SecretSchema *docker_get_schema(void) G_GNUC_CONST; + +#define DOCKER_SCHEMA docker_get_schema() + +GError *add(char *label, char *server, char *username, char *secret); +GError *delete(char *server); +GError *get(char *server, char **username, char **secret); +GError *list(char *label, char *** paths, char *** accts, unsigned int *list_l); +void freeListData(char *** data, unsigned int length); diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index 246e2a3..0000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,1652 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Avilla -Adam Kunk -Adam Miller -Adam Mills -Adam Singer -Adam Walz -Aditi Rajagopal -Aditya -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akihiro Suda -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Olshansky -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre González -Alexandru Sfirlogea -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Ali Dehghani -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Richards -amangoel -Amen Belayneh -Amit Bakshi -Amit Krishnan -Amit Shukla -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew Munsell -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Anil Madhavapeddy -Ankush Agarwal -Anonmily -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -ArikaChen -Arnaud Lefebvre -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Firshman -Ben Golub -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -Bilal Amarni -Bill W -bin liu -Blake Geno -Boaz Shuster -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander G -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dituri -Chris Fordham -Chris Khoo -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Wahl -Chris Weyl -chrismckinnel -Christian Berendt -Christian Böhme -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Mehay -Christophe Troestler -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Cory Forsyth -cressie176 -CrimsonGlory -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Hiltgen -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Darren Stahl -Davanum Srinivas -Dave Barboza -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Lawrence -David Lechner -David M. Karr -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -decadent -deed02392 -Deng Guangxing -Deni Bertovic -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donovan Jones -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Barch -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Windisch -Eric Yang -Eric-Olivier Lamey -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -evalle -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Everett Toews -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangyuan Gao <21551127@zju.edu.cn> -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Ferenc Szabo -Fernando -Fero Volar -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -fl0yd -Flavio Castelli -FLGMwt -Florian -Florian Klein -Florian Maier -Florian Weingarten -Florin Asavoaie -fonglh -fortinux -Francesc Campoy -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -frosforever -fy2462 -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -GabrielNicolasAvellaneda -Galen Sampson -Gareth Rushgrove -Garrett Barboza -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Gildas Cuisinier -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -gs11 -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -gwx296173 -Günter Zöchbauer -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harry Zhang -He Simei -heartlock <21521209@zju.edu.cn> -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Hyzhou <1187766782@qq.com> -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Lee -Ian Main -Ian Truslove -Iavael -Icaro Seara -Igor Dolzhikov -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -ILYA Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nugent -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jgeiger -Jhon Honce -Ji.Zhilong -Jian Zhang -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joao Fernandes -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Starks -John Tims -John Warwick -John Willis -johnharris85 -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Joost Cassee -Jordan -Jordan Arentsen -Jordan Sissel -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Bodah -Josh Chorlton -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -jrabbit -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu(Kennan) -Kamil Domański -kamjar gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -kayrus -Ke Xu -Keith Hudgins -Keli Hu -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Burke -Kevin Clark -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Menard -Kevin P. Kucharczyk -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -kevinmeredith -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill Kolyshkin -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin L -Konstantin Pelykh -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -krrg -Kun Zhang -Kunal Kushwaha -Kyle Conroy -Kyle Linden -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -lalyos -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Laszlo Meszaros -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee Chao <932819864@qq.com> -Lee, Meng-Han -leeplay -Lei Jitang -Len Weincier -Lennie -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Marshall -Lewis Peckover -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liaoqingwei -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -lixiaobing10051267 -LIZAO LI -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Luciano Mores -Luis Martínez de Bartolomé Izquierdo -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -lukemarsden -Lynda O'Leary -Lénaïc Huard -Ma Shimiao -Mabin -Madhav Puri -Madhu Venugopal -Mageee <21521230.zju.edu.cn> -Mahesh Tiyyagura -malnick -Malte Janduda -manchoz -Manfred Touron -Manfred Zabarauskas -Mansi Nahar -mansinahar -Manuel Meurer -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcelo Salazar -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark McGranaghan -Mark McKinstry -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Richardson -Matt Robenolt -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -mattymo -mattyw -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Käufl -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minar -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michiel@unhosted -Mickaël FORTUNATO -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Gaffney -Mike Goelzer -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -mlarcher -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohit Soni -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mqliang -Mrunal Patel -msabansal -mschurenko -muge -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Neal McBurnett -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -nick -Nick DeCoursin -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicola Kabar -Nicolas Borboën -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolás Hock Isaza -Nigel Poulton -NikolaMandic -nikolas -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Olivier Gambier -Olle Jonsson -Oriol Francès -orkaa -Oskar Niburski -Otto Kekäläinen -oyld -ozlerhakan -paetling -pandrew -panticz -Paolo G. Giarrusso -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Paulo Ribeiro -Pavel Lobashov -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Peeyush Gupta -Peggy Li -Pei Su -Penghan Wang -perhapszzy@sina.com -pestophagous -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Phil -Phil Estes -Phil Spitler -Philip Monroe -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Prasanna Gautam -Prayag Verma -Przemek Hejman -pysqz -qg <1373319223@qq.com> -qhuang -Qiang Huang -qq690388648 <690388648@qq.com> -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -Ray Tsang -ReadmeCritic -Recursive Madman -Regan McCooey -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Moyse -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Stern -Robert Terhaar -Robert Wallis -Roberto G. Hashioka -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Strashkin -Ron Smits -Ron Williams -root -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Rozhnov Alexandr -rsmoorthy -Rudolph Gottesheim -Rui Lopes -Runshen Zhu -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -RyanDeng -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -sakeven -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -Satoshi Tagomori -scaleoutsean -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean OMeara -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Serhat Gülçiçek -Sevki Hasirci -Shane Canon -Shane da Silva -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shekhar Gulati -Sheng Yang -Shengbo Song -Shev Yan -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -shuai-z -Shukui Yang -Shuwei Hao -Sian Lerk Lau -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -skaasten -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -srinsriv -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -Tadej Janež -TAGOMORI Satoshi -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Taylor Jones -tbonza -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas LEVEIL -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -timfeirg -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -toogley -Torstein Husebø -tpng -tracylihui <793912329@qq.com> -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -trishnaguha -Tristan Carel -Troy Denton -Tyler Brock -Tzu-Jung Lee -Tõnis Tiigi -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -Veres Lajos -vgeta -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -WANG Chao -Wang Xing -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenkai Yin -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -Wonjun Kim -xamyzhao -Xianlu Bird -XiaoBing Jiang -Xiaoxu Chen -xiekeyang -Xinzi Zhou -Xiuming Chen -xlgao-zju -xuzhaokui -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -yangshukui -Yanqiang Miao -Yasunori Mahata -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongzhi Pan -yorkie -Youcef YEKHLEF -Yuan Sun -yuchangchun -yuchengxia -yuexiao-wang -YuPengZTE -Yurii Rashkovskii -yuzou -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -Zhenan Ye <21551168@zju.edu.cn> -zhouhao -Zhu Guihua -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -搏通 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE index 8f3fee6..9c8e20a 100644 --- a/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2016 Docker, Inc. + Copyright 2013-2017 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE index 8a37c1c..0c74e15 100644 --- a/vendor/github.com/docker/docker/NOTICE +++ b/vendor/github.com/docker/docker/NOTICE @@ -1,5 +1,5 @@ Docker -Copyright 2012-2016 Docker, Inc. +Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md new file mode 100644 index 0000000..534fd97 --- /dev/null +++ b/vendor/github.com/docker/docker/README.md @@ -0,0 +1,57 @@ +The Moby Project +================ + +![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project") + +Moby is an open-source project created by Docker to enable and accelerate software containerization. + +It provides a "Lego set" of toolkit components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts and professionals to experiment and exchange ideas. +Components include container build tools, a container registry, orchestration tools, a runtime and more, and these can be used as building blocks in conjunction with other tools and projects. + +## Principles + +Moby is an open project guided by strong principles, aiming to be modular, flexible and without too strong an opinion on user experience. +It is open to the community to help set its direction. + +- Modular: the project includes lots of components that have well-defined functions and APIs that work together. +- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations. +- Usable security: Moby provides secure defaults without compromising usability. +- Developer focused: The APIs are intended to be functional and useful to build powerful tools. +They are not necessarily intended as end user tools but as components aimed at developers. +Documentation and UX is aimed at developers not end users. + +## Audience + +The Moby Project is intended for engineers, integrators and enthusiasts looking to modify, hack, fix, experiment, invent and build systems based on containers. +It is not for people looking for a commercially supported system, but for people who want to work and learn with open source code. + +## Relationship with Docker + +The components and tools in the Moby Project are initially the open source components that Docker and the community have built for the Docker Project. +New projects can be added if they fit with the community goals. Docker is committed to using Moby as the upstream for the Docker Product. +However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed. + +The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful. +The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases. + +----- + +Legal +===== + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.* + +Use and transfer of Moby may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +Licensing +========= +Moby is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full +license text. diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 0000000..f136c34 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 0000000..beb251a --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.37" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 0000000..af1a541 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 0000000..590ba54 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go index 056af6b..ddf15bb 100644 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" // AuthConfig contains authorization information for connecting to a Registry type AuthConfig struct { diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go index 931ae10..bf3463b 100644 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -1,4 +1,4 @@ -package blkiodev +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" import "fmt" diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 7900d64..18b36d5 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -1,14 +1,13 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "bufio" "io" "net" - "os" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container @@ -75,6 +74,7 @@ type ContainerLogsOptions struct { ShowStdout bool ShowStderr bool Since string + Until string Timestamps bool Follow bool Tail string @@ -98,6 +98,7 @@ type ContainerStartOptions struct { // about files to copy into a container type CopyToContainerOptions struct { AllowOverwriteDirWithFile bool + CopyUIDGID bool } // EventsOptions holds parameters to filter events with. @@ -160,9 +161,10 @@ type ImageBuildOptions struct { ShmSize int64 Dockerfile string Ulimits []*units.Ulimit - // See the parsing of buildArgs in api/server/router/build/build_routes.go - // for an explaination of why BuildArgs needs to use *string instead of - // just a string + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. BuildArgs map[string]*string AuthConfigs map[string]AuthConfig Context io.Reader @@ -175,6 +177,10 @@ type ImageBuildOptions struct { // specified here do not need to have a valid parent chain to match cache. CacheFrom []string SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string } // ImageBuildResponse holds information @@ -187,20 +193,22 @@ type ImageBuildResponse struct { // ImageCreateOptions holds information to create images. type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. } // ImageImportSource holds source information for ImageImport type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. } // ImageImportOptions holds information to import images from the client host. type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image } // ImageListOptions holds parameters to filter the list of images with. @@ -221,6 +229,7 @@ type ImagePullOptions struct { All bool RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry PrivilegeFunc RequestPrivilegeFunc + Platform string } // RequestPrivilegeFunc is a function interface that @@ -256,18 +265,6 @@ type ResizeOptions struct { Width uint } -// VersionResponse holds version information for the client and the server -type VersionResponse struct { - Client *Version - Server *Version -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v VersionResponse) ServerOK() bool { - return v.Server != nil -} - // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { Filters filters.Args @@ -285,6 +282,12 @@ type ServiceCreateOptions struct { // // This field follows the format of the X-Registry-Auth header. EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool } // ServiceCreateResponse contains the information returned to a client @@ -318,14 +321,32 @@ type ServiceUpdateOptions struct { // credentials if they are not given in EncodedRegistryAuth. Valid // values are "spec" and "previous-spec". RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool } -// ServiceListOptions holds parameters to list services with. +// ServiceListOptions holds parameters to list services with. type ServiceListOptions struct { Filters filters.Args } -// TaskListOptions holds parameters to list tasks with. +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. type TaskListOptions struct { Filters filters.Args } @@ -356,15 +377,6 @@ type PluginInstallOptions struct { Args []string } -// SecretRequestOption is a type for requesting secrets -type SecretRequestOption struct { - Source string - Target string - UID string - GID string - Mode os.FileMode -} - // SwarmUnlockKeyResponse contains the response for Engine API: // GET /swarm/unlockkey type SwarmUnlockKeyResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 20c19f2..f6537a2 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "github.com/docker/docker/api/types/container" @@ -25,19 +25,6 @@ type ContainerRmConfig struct { ForceRemove, RemoveVolume, RemoveLink bool } -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - // ExecConfig is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecConfig struct { @@ -50,6 +37,7 @@ type ExecConfig struct { Detach bool // Execute in detach mode DetachKeys string // Escape keys for detach Env []string // Environment variables + WorkingDir string // Working directory Cmd []string // Execution commands and args } diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index fc050e5..89ad08c 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" import ( "time" @@ -7,6 +7,12 @@ import ( "github.com/docker/go-connections/nat" ) +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. @@ -19,8 +25,9 @@ type HealthConfig struct { Test []string `json:",omitempty"` // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. // Retries is the number of consecutive failures needed to consider a container as unhealthy. // Zero means inherit. diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go new file mode 100644 index 0000000..c909d6c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go index d028e3b..49efa0f 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -4,10 +4,10 @@ package container // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/swagger-gen.sh +// See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerCreateCreatedBody container create created body +// ContainerCreateCreatedBody OK response to ContainerCreate operation // swagger:model ContainerCreateCreatedBody type ContainerCreateCreatedBody struct { diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 0000000..ba41edc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process is an array of values corresponding to the titles + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go index 81ee12c..7630ae5 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -4,10 +4,10 @@ package container // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/swagger-gen.sh +// See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerUpdateOKBody container update o k body +// ContainerUpdateOKBody OK response to ContainerUpdate operation // swagger:model ContainerUpdateOKBody type ContainerUpdateOKBody struct { diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go index 16cf335..9e3910a 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -4,13 +4,25 @@ package container // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/swagger-gen.sh +// See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerWaitOKBody container wait o k body +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + +// ContainerWaitOKBody OK response to ContainerWait operation // swagger:model ContainerWaitOKBody type ContainerWaitOKBody struct { + // error + // Required: true + Error *ContainerWaitOKBodyError `json:"Error"` + // Exit code of the container // Required: true StatusCode int64 `json:"StatusCode"` diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 0c82d62..02271ec 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" import ( "strings" @@ -10,9 +10,6 @@ import ( "github.com/docker/go-units" ) -// NetworkMode represents the container network stack. -type NetworkMode string - // Isolation represents the isolation technology of a container. The supported // values are platform specific type Isolation string @@ -23,42 +20,101 @@ func (i Isolation) IsDefault() bool { return strings.ToLower(string(i)) == "default" || string(i) == "" } +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +const ( + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") +) + // IpcMode represents the container ipc stack. type IpcMode string -// IsPrivate indicates whether the container uses its private ipc stack. +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) + return n == "private" } -// IsHost indicates whether the container uses the host's ipc stack. +// IsHost indicates whether the container shares the host's ipc namespace. func (n IpcMode) IsHost() bool { return n == "host" } -// IsContainer indicates whether the container uses a container's ipc stack. +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == "shareable" +} + +// IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } -// Valid indicates whether the ipc stack is valid. +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == "none" +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() } // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == "container" { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { return parts[1] @@ -66,6 +122,14 @@ func (n IpcMode) Container() string { return "" } +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + // UsernsMode represents userns mode in the container. type UsernsMode string @@ -223,6 +287,17 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount } +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + // LogConfig represents the logging configuration of the container. type LogConfig struct { Type string @@ -251,6 +326,7 @@ type Resources struct { CpusetCpus string // CpusetCpus 0-2, 0,1 CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup DiskQuota int64 // Disk limit (in bytes) KernelMemory int64 // Kernel memory limit (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) @@ -317,7 +393,7 @@ type HostConfig struct { // Applicable to Windows ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (eg default, hyperv) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) // Contains container's resources (cgroups, ulimits) Resources @@ -327,7 +403,4 @@ type HostConfig struct { // Run a custom init inside the container, if null, use the daemon's configured settings Init *bool `json:",omitempty"` - - // Custom init path - InitPath string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 9fb79be..cf6fdf4 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -1,24 +1,12 @@ // +build !windows -package container - -import "strings" +package container // import "github.com/docker/docker/api/types/container" // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() } -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsBridge() { @@ -47,35 +35,7 @@ func (n NetworkMode) IsHost() bool { return n == "host" } -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() } - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 0ee332b..99f803a 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -1,24 +1,4 @@ -package container - -import ( - "strings" -) - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsContainer indicates whether container uses a container network stack. -// Returns false as windows doesn't support this mode -func (n NetworkMode) IsContainer() bool { - return false -} +package container // import "github.com/docker/docker/api/types/container" // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT @@ -32,30 +12,9 @@ func (n NetworkMode) IsHost() bool { return false } -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// ConnectedContainer is the id of the container which network this container is connected to. -// Returns blank string on windows -func (n NetworkMode) ConnectedContainer() string { - return "" -} - // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() } // IsValid indicates if an isolation technology is valid @@ -71,17 +30,11 @@ func (n NetworkMode) NetworkName() string { return "nat" } else if n.IsNone() { return "none" + } else if n.IsContainer() { + return "container" } else if n.IsUserDefined() { return n.UserDefined() } return "" } - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 0000000..cd8311f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 7129a65..027c6ed 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,4 +1,4 @@ -package events +package events // import "github.com/docker/docker/api/types/events" const ( // ContainerEventType is the event type that containers generate @@ -13,6 +13,14 @@ const ( PluginEventType = "plugin" // VolumeEventType is the event type that volumes generate VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" + // ConfigEventType is the event type that configs generate + ConfigEventType = "config" ) // Actor describes something that generates events, @@ -36,6 +44,8 @@ type Message struct { Type string Action string Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index e01a41d..a41e3d8 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -1,38 +1,45 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters +/*Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" import ( "encoding/json" "errors" - "fmt" "regexp" "strings" "github.com/docker/docker/api/types/versions" ) -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +// Args stores a mapping of keys to a set of multiple values. type Args struct { fields map[string]map[string]bool } -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string } -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// ParseFlag parses a key=value string and adds it to an Args. // -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. +// Deprecated: Use Args.Add() func ParseFlag(arg string, prev Args) (Args, error) { filters := prev if len(arg) == 0 { @@ -53,74 +60,95 @@ func ParseFlag(arg string, prev Args) (Args, error) { return filters, nil } -// ErrBadFormat is an error returned in case of bad format for a filter. +// ErrBadFormat is an error returned when a filter is not in the form key=value +// +// Deprecated: this error will be removed in a future version var ErrBadFormat = errors.New("bad format of filter (expected name=value)") -// ToParam packs the Args into a string for easy transport from client to server. +// ToParam encodes the Args as args JSON encoded string +// +// Deprecated: use ToJSON func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil + return ToJSON(a) +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte{}, nil } + return json.Marshal(args.fields) +} - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil } - return string(buf), nil + buf, err := json.Marshal(a) + return string(buf), err } -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: Use ToJSON func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space if a.Len() == 0 { return "", nil } - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err } - return string(buf), nil + + return ToJSON(a) } -// FromParam unpacks the filter Args. +// FromParam decodes a JSON encoded string into Args +// +// Deprecated: use FromJSON func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil + return FromJSON(p) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil } - r := strings.NewReader(p) - d := json.NewDecoder(r) + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + if len(raw) == 0 { + return nil } - return Args{m}, nil + return json.Unmarshal(raw, &args.fields) } -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] if values == nil { return make([]string, 0) } @@ -131,37 +159,34 @@ func (filters Args) Get(field string) []string { return slice } -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true } else { - filters.fields[name] = map[string]bool{value: true} + args.fields[key] = map[string]bool{value: true} } } -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - if len(filters.fields[name]) == 0 { - delete(filters.fields, name) +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) } } } -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) } -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { @@ -172,8 +197,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool { return false } - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) v, ok := sources[testKV[0]] if !ok { @@ -187,16 +212,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool { return true } -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { return true } - fieldValues := filters.fields[field] + fieldValues := args.fields[field] for name2match := range fieldValues { match, err := regexp.MatchString(name2match, source) if err != nil { @@ -209,9 +231,9 @@ func (filters Args) Match(field, source string) bool { return false } -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] //do not filter if there is no filter set or cannot determine filter if !ok || len(fieldValues) == 0 { return true @@ -221,14 +243,15 @@ func (filters Args) ExactMatch(field, source string) bool { return fieldValues[source] } -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } - if len(filters.fields[field]) != 1 { + if len(args.fields[key]) != 1 { return false } @@ -236,14 +259,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool { return fieldValues[source] } -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { return true } - fieldValues := filters.fields[field] + fieldValues := args.fields[key] for prefix := range fieldValues { if strings.HasPrefix(source, prefix) { return true @@ -252,30 +275,47 @@ func (filters Args) FuzzyMatch(field, source string) bool { return false } -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] +// Include returns true if the key exists in the mapping +// +// Deprecated: use Contains +func (args Args) Include(field string) bool { + _, ok := args.fields[field] + return ok +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] return ok } -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { +type invalidFilter string + +func (e invalidFilter) Error() string { + return "Invalid filter '" + string(e) + "'" +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) + return invalidFilter(name) } } return nil } -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { return nil } - for v := range filters.fields[field] { + for v := range args.fields[field] { if err := op(v); err != nil { return err } diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 0000000..4d9bf1c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go new file mode 100644 index 0000000..d6b354b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -0,0 +1,37 @@ +package image + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem individual image layer information in response to ImageHistory operation +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 0000000..b9a65a0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 31f2365..3fef974 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/api/types/mount" import ( "os" @@ -15,6 +15,8 @@ const ( TypeVolume Type = "volume" // TypeTmpfs is the type for mounting tmpfs TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" ) // Mount represents a mount (volume). @@ -23,9 +25,10 @@ type Mount struct { // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` BindOptions *BindOptions `json:",omitempty"` VolumeOptions *VolumeOptions `json:",omitempty"` @@ -60,6 +63,20 @@ var Propagations = []Propagation{ PropagationSlave, } +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { Propagation Propagation `json:",omitempty"` @@ -83,7 +100,7 @@ type TmpfsOptions struct { // Size sets the size of the tmpfs, in bytes. // // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be convered to + // depending on the host. For example, on linux, it will be converted to // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with // docker, uses a straight byte value. // diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 832b3ed..761d0b3 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,4 +1,4 @@ -package network +package network // import "github.com/docker/docker/api/types/network" // Address represents an IP address type Address struct { @@ -28,7 +28,15 @@ type EndpointIPAMConfig struct { LinkLocalIPs []string `json:",omitempty"` } -// PeerInfo represents one peer of a overlay network +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network type PeerInfo struct { Name string IP string @@ -50,6 +58,42 @@ type EndpointSettings struct { GlobalIPv6Address string GlobalIPv6PrefixLen int MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy } // NetworkingConfig represents the container's networking configuration for each of its interfaces @@ -57,3 +101,8 @@ type EndpointSettings struct { type NetworkingConfig struct { EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network } + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go index 6cc7a23..cab333e 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -11,7 +11,7 @@ type Plugin struct { // Required: true Config PluginConfig `json:"Config"` - // True when the plugin is running. False when the plugin is not running, only installed. + // True if the plugin is running. False if the plugin is not running, only installed. // Required: true Enabled bool `json:"Enabled"` @@ -42,6 +42,9 @@ type PluginConfig struct { // Required: true Description string `json:"Description"` + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + // documentation // Required: true Documentation string `json:"Documentation"` @@ -58,6 +61,10 @@ type PluginConfig struct { // Required: true Interface PluginConfigInterface `json:"Interface"` + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + // linux // Required: true Linux PluginConfigLinux `json:"Linux"` @@ -70,6 +77,10 @@ type PluginConfig struct { // Required: true Network PluginConfigNetwork `json:"Network"` + // pid host + // Required: true + PidHost bool `json:"PidHost"` + // propagated mount // Required: true PropagatedMount string `json:"PropagatedMount"` diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go index d6f7553..60d1fb5 100644 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -1,21 +1,14 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "encoding/json" "fmt" + "sort" ) // PluginsListResponse contains the response for the Engine API type PluginsListResponse []*Plugin -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - // UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { versionIndex := len(p) @@ -62,3 +55,17 @@ type PluginPrivilege struct { // PluginPrivileges is a list of PluginPrivilege type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go index ad52d46..d912347 100644 --- a/vendor/github.com/docker/docker/api/types/port.go +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -7,7 +7,7 @@ package types // swagger:model Port type Port struct { - // IP + // Host IP address that the container's port is mapped to IP string `json:"IP,omitempty"` // Port on the container diff --git a/vendor/github.com/docker/docker/api/types/reference/image_reference.go b/vendor/github.com/docker/docker/api/types/reference/image_reference.go deleted file mode 100644 index be9cf8e..0000000 --- a/vendor/github.com/docker/docker/api/types/reference/image_reference.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import ( - distreference "github.com/docker/distribution/reference" -) - -// Parse parses the given references and returns the repository and -// tag (if present) from it. If there is an error during parsing, it will -// return an error. -func Parse(ref string) (string, string, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return "", "", err - } - - tag := GetTagFromNamedRef(distributionRef) - return distributionRef.Name(), tag, nil -} - -// GetTagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api makes the distinction between repository -// and tags. -func GetTagFromNamedRef(ref distreference.Named) string { - var tag string - switch x := ref.(type) { - case distreference.Digested: - tag = x.Digest().String() - case distreference.NamedTagged: - tag = x.Tag() - default: - tag = "latest" - } - return tag -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go index 5e37d19..f0a2113 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -1,10 +1,10 @@ -package registry +package registry // import "github.com/docker/docker/api/types/registry" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/swagger-gen.sh +// See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- // AuthenticateOKBody authenticate o k body diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 28fafab..8789ad3 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -1,15 +1,19 @@ -package registry +package registry // import "github.com/docker/docker/api/types/registry" import ( "encoding/json" "net" + + "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string } // NetIPNet is the net.IPNet type, which can be marshalled and @@ -102,3 +106,14 @@ type SearchResults struct { // Results is a slice containing the actual results for the search Results []SearchResult `json:"results"` } + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go index 4f02ef3..67a41e1 100644 --- a/vendor/github.com/docker/docker/api/types/seccomp.go +++ b/vendor/github.com/docker/docker/api/types/seccomp.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { @@ -10,7 +10,7 @@ type Seccomp struct { Syscalls []*Syscall `json:"syscalls"` } -// Architecture is used to represent an specific architecture +// Architecture is used to represent a specific architecture // and its sub-architectures type Architecture struct { Arch Arch `json:"architecture"` diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go index 9bf1928..60175c0 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -1,6 +1,6 @@ // Package types is used for API stability in the types and response to the // consumers of the API stats endpoint. -package types +package types // import "github.com/docker/docker/api/types" import "time" @@ -47,6 +47,9 @@ type CPUStats struct { // System Usage. Linux only. SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + // Throttling Data. Linux only. ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go index bad493f..82921ce 100644 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -1,4 +1,4 @@ -package strslice +package strslice // import "github.com/docker/docker/api/types/strslice" import "encoding/json" diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index 64a648b..ef020f4 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "time" @@ -17,11 +17,24 @@ type Meta struct { // Annotations represents how to describe an object. type Annotations struct { Name string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` + Labels map[string]string `json:"Labels"` } -// Driver represents a driver (network, logging). +// Driver represents a driver (network, logging, secrets backend). type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` } + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 0000000..a1555cf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,35 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 4ab476c..0041653 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import ( "time" @@ -21,6 +21,28 @@ type DNSConfig struct { Options []string `json:",omitempty"` } +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + // ContainerSpec represents the spec of a container. type ContainerSpec struct { Image string `json:",omitempty"` @@ -32,15 +54,20 @@ type ContainerSpec struct { Dir string `json:",omitempty"` User string `json:",omitempty"` Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + StopSignal string `json:",omitempty"` TTY bool `json:",omitempty"` OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` Mounts []mount.Mount `json:",omitempty"` StopGracePeriod *time.Duration `json:",omitempty"` Healthcheck *container.HealthConfig `json:",omitempty"` // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go index 5a5e11b..98ef328 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -1,4 +1,8 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) // Endpoint represents an endpoint. type Endpoint struct { @@ -58,6 +62,8 @@ const ( PortConfigProtocolTCP PortConfigProtocol = "tcp" // PortConfigProtocolUDP UDP PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" ) // EndpointVirtualIP represents the virtual ip of a port. @@ -78,17 +84,21 @@ type Network struct { // NetworkSpec represents the spec of a network. type NetworkSpec struct { Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` } // NetworkAttachmentConfig represents the configuration of a network attachment. type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` } // NetworkAttachment represents a network attachment. diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 379e17a..1e30f5f 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" // Node represents a node. type Node struct { @@ -52,6 +52,7 @@ type NodeDescription struct { Platform Platform `json:",omitempty"` Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 0000000..81b5f4c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,19 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 0000000..98c2806 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 0000000..1fdc9b0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 0000000..6d63b77 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go index fdb2388..d5213ec 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "os" @@ -12,7 +12,12 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` } // SecretReferenceFileTarget is a file target in a secret reference diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go index 2cf2642..abf192e 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "time" @@ -6,10 +6,10 @@ import "time" type Service struct { ID string Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus UpdateStatus `json:",omitempty"` + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` } // ServiceSpec represents the spec of a service. @@ -18,9 +18,10 @@ type ServiceSpec struct { // TaskTemplate defines how the service should construct new tasks when // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` // Networks field in ServiceSpec is deprecated. The // same field in TaskSpec should be used instead. @@ -45,13 +46,19 @@ const ( UpdateStatePaused UpdateState = "paused" // UpdateStateCompleted is the completed state. UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" ) // UpdateStatus reports the status of a service update. type UpdateStatus struct { State UpdateState `json:",omitempty"` - StartedAt time.Time `json:",omitempty"` - CompletedAt time.Time `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` Message string `json:",omitempty"` } @@ -68,6 +75,13 @@ const ( UpdateFailureActionPause = "pause" // UpdateFailureActionContinue CONTINUE UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" ) // UpdateConfig represents the update configuration. @@ -102,4 +116,9 @@ type UpdateConfig struct { // If the failure action is PAUSE, no more tasks will be updated until // another update is started. MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string } diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 0b42219..1b111d7 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -1,13 +1,15 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "time" -// ClusterInfo represents info about the cluster for outputing in "info" +// ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens type ClusterInfo struct { ID string Meta - Spec Spec + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool } // Swarm represents a swarm. @@ -107,6 +109,16 @@ type CAConfig struct { // ExternalCAs is a list of CAs to which a manager node will make // certificate signing requests for node certificates. ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` } // ExternalCAProtocol represents type of external CA. @@ -126,23 +138,31 @@ type ExternalCA struct { // Options is a set of additional key/value pairs whose interpretation // depends on the specified CA type. Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string } // InitRequest is the request used to init a swarm. type InitRequest struct { ListenAddr string AdvertiseAddr string + DataPathAddr string ForceNewCluster bool Spec Spec AutoLockManagers bool + Availability NodeAvailability } // JoinRequest is the request used to join a swarm. type JoinRequest struct { ListenAddr string AdvertiseAddr string + DataPathAddr string RemoteAddrs []string JoinToken string // accept by secret + Availability NodeAvailability } // UnlockRequest is the request used to unlock a swarm. @@ -177,10 +197,10 @@ type Info struct { Error string RemoteManagers []Peer - Nodes int - Managers int + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` - Cluster ClusterInfo + Cluster *ClusterInfo `json:",omitempty"` } // Peer represents a peer. diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index ace12cc..5c2bc49 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -1,6 +1,10 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) // TaskState represents the state of a task. type TaskState string @@ -32,6 +36,10 @@ const ( TaskStateFailed TaskState = "failed" // TaskStateRejected REJECTED TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" ) // Task represents a task. @@ -47,11 +55,16 @@ type Task struct { Status TaskStatus `json:",omitempty"` DesiredState TaskState `json:",omitempty"` NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` } // TaskSpec represents the spec of a task. type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` + // ContainerSpec and PluginSpec are mutually exclusive. + // PluginSpec will only be used when the `Runtime` field is set to `plugin` + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` Placement *Placement `json:",omitempty"` @@ -65,12 +78,40 @@ type TaskSpec struct { // ForceUpdate is a counter that triggers an update even if no relevant // parameters have been changed. ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` } // Resources represents resources (CPU/Memory). type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` } // ResourceRequirements represents resources requirements. @@ -81,7 +122,26 @@ type ResourceRequirements struct { // Placement represents orchestration parameters. type Placement struct { - Constraints []string `json:",omitempty"` + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string } // RestartPolicy represents the restart policy. @@ -106,19 +166,19 @@ const ( // TaskStatus represents the status of a task. type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` } // ContainerStatus represents the status of a container. type ContainerStatus struct { - ContainerID string `json:",omitempty"` - PID int `json:",omitempty"` - ExitCode int `json:",omitempty"` + ContainerID string + PID int + ExitCode int } // PortStatus represents the port status of a task's host ports whose diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go index 63e1eec..84b6f07 100644 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert.go @@ -1,4 +1,4 @@ -package time +package time // import "github.com/docker/docker/api/types/time" import ( "strconv" diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index d3695ba..ea3495e 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -1,4 +1,4 @@ -package time +package time // import "github.com/docker/docker/api/types/time" import ( "fmt" @@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } var format string - var parseInLocation bool - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) if strings.Contains(value, ".") { if parseInLocation { @@ -84,11 +82,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } if err != nil { - // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + // if there is a `-` then it's an RFC3339 like timestamp if strings.Contains(value, "-") { return "", err // was probably an RFC3339 like timestamp but the parser failed with an error } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) } return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil @@ -106,6 +107,10 @@ func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { return def, 0, nil } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { @@ -118,7 +123,7 @@ func ParseTimestamps(value string, def int64) (int64, int64, error) { if err != nil { return s, n, err } - // should already be in nanoseconds but just in case convert n to nanoseonds + // should already be in nanoseconds but just in case convert n to nanoseconds n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) return s, n, nil } diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index a82c3e8..729f4eb 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "errors" @@ -17,38 +17,6 @@ import ( "github.com/docker/go-connections/nat" ) -// ContainerChange contains response of Engine API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Engine API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Engine API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - // RootFS returns Image's RootFS description including the layer IDs. type RootFS struct { Type string @@ -77,6 +45,12 @@ type ImageInspect struct { VirtualSize int64 GraphDriver GraphDriverData RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` } // Container contains response of Engine API: @@ -125,23 +99,29 @@ type ContainerStats struct { OSType string `json:"ostype"` } -// ContainerProcessList contains response of Engine API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - // Ping contains response of Engine API: // GET "/_ping" type Ping struct { APIVersion string + OSType string Experimental bool } +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + // Version contains response of Engine API: // GET "/version" type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + Version string APIVersion string `json:"ApiVersion"` MinAPIVersion string `json:"MinAPIVersion,omitempty"` @@ -154,11 +134,11 @@ type Version struct { BuildTime string `json:",omitempty"` } -// Commit records a external tool actual commit id version along the -// one expect by dockerd as set at build time +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. type Commit struct { - ID string - Expected string + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. } // Info contains response of Engine API: @@ -200,6 +180,7 @@ type Info struct { RegistryConfig *registry.ServiceConfig NCPU int MemTotal int64 + GenericResources []swarm.GenericResource DockerRootDir string HTTPProxy string `json:"HttpProxy"` HTTPSProxy string `json:"HttpsProxy"` @@ -276,6 +257,8 @@ type PluginsInfo struct { Network []string // List of Authorization plugins registered Authorization []string + // List of Log plugins registered + Log []string } // ExecStartCheck is a temp struct used by execStart @@ -313,7 +296,7 @@ type Health struct { // ContainerState stores container's running state // it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { - Status string + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" Running bool Paused bool Restarting bool @@ -356,6 +339,7 @@ type ContainerJSONBase struct { Name string RestartCount int Driver string + Platform string MountLabel string ProcessLabel string AppArmorProfile string @@ -429,19 +413,23 @@ type MountPoint struct { // NetworkResource is the body of the "get network" http response message type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` } // EndpointResource contains network resources allocated and used for a container in a network @@ -455,12 +443,23 @@ type EndpointResource struct { // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. CheckDuplicate bool Driver string + Scope string EnableIPv6 bool IPAM *network.IPAM Internal bool Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference Options map[string]string Labels map[string]string } @@ -489,6 +488,12 @@ type NetworkDisconnect struct { Force bool } +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + // Checkpoint represents the details of a checkpoint type Checkpoint struct { Name string // Name is the name of the checkpoint @@ -503,10 +508,11 @@ type Runtime struct { // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuilderSize int64 } // ContainersPruneReport contains the response for Engine API: @@ -526,7 +532,13 @@ type VolumesPruneReport struct { // ImagesPruneReport contains the response for Engine API: // POST "/images/prune" type ImagesPruneReport struct { - ImagesDeleted []ImageDelete + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { SpaceReclaimed uint64 } @@ -547,3 +559,29 @@ type SecretCreateResponse struct { type SecretListOptions struct { Filters filters.Args } + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md index cdac50a..1ef911e 100644 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -1,10 +1,10 @@ -## Legacy API type versions +# Legacy API type versions This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. -### Package name conventions +## Package name conventions The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 611d4fe..8ccb0aa 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -1,4 +1,4 @@ -package versions +package versions // import "github.com/docker/docker/api/types/versions" import ( "strconv" diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go index da4f8eb..b5ee96a 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -7,6 +7,9 @@ package types // swagger:model Volume type Volume struct { + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + // Name of the volume driver used by the volume. // Required: true Driver string `json:"Driver"` @@ -44,15 +47,23 @@ type Volume struct { UsageData *VolumeUsageData `json:"UsageData,omitempty"` } -// VolumeUsageData volume usage data +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// // swagger:model VolumeUsageData type VolumeUsageData struct { - // The number of containers referencing this volume. + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // // Required: true RefCount int64 `json:"RefCount"` - // The disk space used by the volume (local driver only) + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // // Required: true Size int64 `json:"Size"` } diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go similarity index 84% rename from vendor/github.com/docker/docker/api/types/volume/volumes_create.go rename to vendor/github.com/docker/docker/api/types/volume/volume_create.go index 679c160..539e9b9 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -4,12 +4,12 @@ package volume // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/swagger-gen.sh +// See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// VolumesCreateBody volumes create body -// swagger:model VolumesCreateBody -type VolumesCreateBody struct { +// VolumeCreateBody +// swagger:model VolumeCreateBody +type VolumeCreateBody struct { // Name of the volume driver to use. // Required: true diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go similarity index 78% rename from vendor/github.com/docker/docker/api/types/volume/volumes_list.go rename to vendor/github.com/docker/docker/api/types/volume/volume_list.go index 7770bcb..1bb279d 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -4,14 +4,14 @@ package volume // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/swagger-gen.sh +// See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- import "github.com/docker/docker/api/types" -// VolumesListOKBody volumes list o k body -// swagger:model VolumesListOKBody -type VolumesListOKBody struct { +// VolumeListOKBody +// swagger:model VolumeListOKBody +type VolumeListOKBody struct { // List of volumes // Required: true diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 0000000..c4772a0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,30 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) + if err != nil { + return nil, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 0effe49..921024f 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointCreate creates a checkpoint from the given container with the given name diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index e6e7558..54f55fa 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointDelete deletes the checkpoint with the given name from the given container diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 8eb720a..2b73fb5 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) -// CheckpointList returns the volumes configured in the docker host. +// CheckpointList returns the checkpoints of the given container in the docker host func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { var checkpoints []types.Checkpoint @@ -19,7 +19,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) if err != nil { - return checkpoints, err + return checkpoints, wrapResponseError(err, resp, "container", container) } err = json.NewDecoder(resp.body).Decode(&checkpoints) diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index a9bdab6..b874b3b 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -1,10 +1,6 @@ /* Package client is a Go client for the Docker Engine API. -The "docker" command uses this package to communicate with the daemon. It can also -be used by your own Go applications to do anything the command-line interface does -– running containers, pulling images, managing swarms, etc. - For more information about the Engine API, see the documentation: https://docs.docker.com/engine/reference/api/ @@ -43,22 +39,29 @@ For example, to list running containers (the equivalent of "docker ps"): } */ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "fmt" + "net" "net/http" "net/url" "os" + "path" "path/filepath" "strings" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" ) -// DefaultVersion is the version of the current stable API -const DefaultVersion string = "1.25" +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") // Client is the API client that performs all operations // against a docker server. @@ -83,13 +86,39 @@ type Client struct { manualOverride bool } +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + // NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the tls certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use NewClientWithOpts(FromEnv) func NewEnvClient() (*Client, error) { - var client *http.Client + return NewClientWithOpts(FromEnv) +} + +// FromEnv configures the client with values from environment variables. +// +// Supported environment variables: +// DOCKER_HOST to set the url to the docker server. +// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// DOCKER_CERT_PATH to load the TLS certificates from. +// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func FromEnv(c *Client) error { if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { options := tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, "ca.pem"), @@ -99,93 +128,178 @@ func NewEnvClient() (*Client, error) { } tlsc, err := tlsconfig.Client(options) if err != nil { - return nil, err + return err } - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, } } - host := os.Getenv("DOCKER_HOST") - if host == "" { - host = DefaultDockerHost + if host := os.Getenv("DOCKER_HOST"); host != "" { + if err := WithHost(host)(c); err != nil { + return err + } } - version := os.Getenv("DOCKER_API_VERSION") - if version == "" { - version = DefaultVersion + + if version := os.Getenv("DOCKER_API_VERSION"); version != "" { + c.version = version + c.manualOverride = true } + return nil +} - cli, err := NewClient(host, version, client, nil) - if err != nil { - return cli, err +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) } - if os.Getenv("DOCKER_API_VERSION") != "" { - cli.manualOverride = true +} + +// WithDialer applies the dialer.DialContext to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialer(dialer *net.Dialer) func(*Client) error { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialer.DialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) } - return cli, nil } -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. +// WithVersion overrides the client version with the specified one +func WithVersion(version string) func(*Client) error { + return func(c *Client) error { + c.version = version + return nil + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) func(*Client) error { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) func(*Client) error { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) func(*Client) error { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// NewClientWithOpts initializes a new API client with default values. It takes functors +// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` // It also initializes the custom http headers to add to each request. // // It won't send any version information if the version number is empty. It is // highly recommended that you set a version or your client may break if the // server is upgraded. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) +func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { return nil, err } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + scheme: "http", + client: client, + proto: defaultProto, + addr: defaultAddr, + } - if client != nil { - if _, ok := client.Transport.(*http.Transport); !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) - } - } else { - transport := new(http.Transport) - sockets.ConfigureTransport(transport, proto, addr) - client = &http.Client{ - Transport: transport, + for _, op := range ops { + if err := op(c); err != nil { + return nil, err } } - scheme := "http" - tlsConfig := resolveTLSConfig(client.Transport) + if _, ok := c.client.Transport.(http.RoundTripper); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) + } + tlsConfig := resolveTLSConfig(c.client.Transport) if tlsConfig != nil { // TODO(stevvooe): This isn't really the right way to write clients in Go. // `NewClient` should probably only take an `*http.Client` and work from there. // Unfortunately, the model of having a host-ish/url-thingy as the connection // string has us confusing protocol and transport layers. We continue doing // this to avoid breaking existing clients but this should be addressed. - scheme = "https" - } - - return &Client{ - scheme: scheme, - host: host, - proto: proto, - addr: addr, - basePath: basePath, - client: client, - version: version, - customHTTPHeaders: httpHeaders, + c.scheme = "https" + } + + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + url, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := new(http.Transport) + sockets.ConfigureTransport(transport, url.Scheme, url.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, }, nil } -// Close ensures that transport.Client is closed -// especially needed while using NewClient with *http.Client = nil -// for example -// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) -func (cli *Client) Close() error { +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// Deprecated: use NewClientWithOpts +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} +// Close the transport used by the client +func (cli *Client) Close() error { if t, ok := cli.client.Transport.(*http.Transport); ok { t.CloseIdleConnections() } - return nil } @@ -195,41 +309,64 @@ func (cli *Client) getAPIPath(p string, query url.Values) string { var apiPath string if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + apiPath = path.Join(cli.basePath, "/v"+v, p) } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + apiPath = path.Join(cli.basePath, p) } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() } -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. +// ClientVersion returns the API version used by this client. func (cli *Client) ClientVersion() string { return cli.version } -// UpdateClientVersion updates the version string associated with this -// instance of the Client. -func (cli *Client) UpdateClientVersion(v string) { - if !cli.manualOverride { - cli.version = v +// NegotiateAPIVersion queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + ping, _ := cli.Ping(ctx) + cli.NegotiateAPIVersionPing(ping) +} + +// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion +// if the ping version is less than the default version. +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if cli.manualOverride { + return } + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(p.APIVersion, cli.version) { + cli.version = p.APIVersion + } } -// ParseHost verifies that the given host strings is valid. -func ParseHost(host string) (string, string, string, error) { +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + return &*cli.client +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { protoAddrParts := strings.SplitN(host, "://", 2) if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + return nil, fmt.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -237,10 +374,29 @@ func ParseHost(host string) (string, string, string, error) { if proto == "tcp" { parsed, err := url.Parse("tcp://" + addr) if err != nil { - return "", "", "", err + return nil, err } addr = parsed.Host basePath = parsed.Path } - return proto, addr, basePath, nil + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// CustomHTTPHeaders returns the custom http headers stored by the client. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. +// Deprecated: use WithHTTPHeaders when creating the client. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers } diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 89de892..3d24470 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,6 +1,9 @@ -// +build linux freebsd solaris openbsd darwin +// +build linux freebsd openbsd darwin -package client +package client // import "github.com/docker/docker/client" // DefaultDockerHost defines os specific default if DOCKER_HOST is unset const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index 07c0c7a..c649e54 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,4 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" // DefaultDockerHost defines os specific default if DOCKER_HOST is unset const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 0000000..c8b802a --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 0000000..4ac566a --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 0000000..2b9d546 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + ensureReaderClosed(resp) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 0000000..a96871e --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "config", id) +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 0000000..39e59cf --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index eea4682..88ba1ef 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -1,16 +1,36 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerAttach attaches a connection to a container in the server. // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index c766d62..377a2ea 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -1,31 +1,33 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "errors" "net/url" - distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/reference" - "golang.org/x/net/context" ) // ContainerCommit applies changes into a container and creates a new tagged image. func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { - distributionRef, err := distreference.ParseNamed(options.Reference) + ref, err := reference.ParseNormalizedNamed(options.Reference) if err != nil { return types.IDResponse{}, err } - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + if _, isCanonical := ref.(reference.Canonical); isCanonical { return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") } + ref = reference.TagNameOnly(ref) - tag = reference.GetTagFromNamedRef(distributionRef) - repository = distributionRef.Name() + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) } query := url.Values{} @@ -37,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option for _, change := range options.Changes { query.Add("changes", change) } - if options.Pause != true { + if !options.Pause { query.Set("pause", "0") } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 8380eea..d706260 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -1,6 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -10,8 +11,6 @@ import ( "path/filepath" "strings" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) @@ -20,29 +19,34 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + urlStr := "/containers/" + containerID + "/archive" response, err := cli.head(ctx, urlStr, query, nil) if err != nil { - return types.ContainerPathStat{}, err + return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) } defer ensureReaderClosed(response) return getContainerPathStatFromHeader(response.header) } // CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. if !options.AllowOverwriteDirWithFile { query.Set("noOverwriteDirNonDir", "true") } - apiPath := fmt.Sprintf("/containers/%s/archive", container) + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" response, err := cli.putRaw(ctx, apiPath, query, content, nil) if err != nil { - return err + return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) } defer ensureReaderClosed(response) @@ -54,15 +58,15 @@ func (cli *Client) CopyToContainer(ctx context.Context, container, path string, } // CopyFromContainer gets the content from the container and returns it as a Reader -// to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - apiPath := fmt.Sprintf("/containers/%s/archive", container) + apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, err + return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) } if response.statusCode != http.StatusOK { diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 9f627aa..d269a61 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -1,13 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/versions" ) type configWrapper struct { @@ -25,6 +26,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config return response, err } + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + query := url.Values{} if containerName != "" { query.Set("name", containerName) @@ -39,7 +45,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) if err != nil { if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} + return response, objectNotFoundError{object: "image", id: config.Image} } return response, err } diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go index 1e3e554..3b7c90c 100644 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -1,16 +1,16 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/container" ) // ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { - var changes []types.ContainerChange +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 0665c54..535536b 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerExecCreate creates a new exec configuration to run an exec process. @@ -35,7 +35,7 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { headers := map[string][]string{"Content-Type": {"application/json"}} return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) } diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go index 52194f3..d0c0a5c 100644 --- a/vendor/github.com/docker/docker/client/container_export.go +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ContainerExport retrieves the raw contents of a container diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index 17f1809..f453064 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -1,24 +1,23 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerInspect returns the container information. func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err + return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) } var response types.ContainerJSON @@ -29,16 +28,16 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty // ContainerInspectWithRaw returns the container information and its raw representation. func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } query := url.Values{} if getSize { query.Set("size", "1") } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err + return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index 29f80c7..4d6f1d2 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerKill terminates the container process but does not remove the container from the docker host. diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index 4398912..9c218e2 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainerList returns the list of containers in the docker host. diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 69056b6..5b6541f 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -1,18 +1,38 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ContainerLogs returns the logs generated by a container in an io.ReadCloser. // It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { @@ -26,11 +46,19 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + if options.Timestamps { query.Set("timestamps", "1") } @@ -46,7 +74,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) if err != nil { - return nil, err + return nil, wrapResponseError(err, resp, "container", container) } return resp.body, nil } diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go index 412067a..5e7271a 100644 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -1,6 +1,6 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerPause pauses the main process of a given container without terminating it. func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index b582170..14f88d9 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainersPrune requests the daemon to delete unused data diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index 3a79590..ab4cfc1 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerRemove kills and removes a container from the docker host. @@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "container", containerID) } diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go index 0e718da..240fdf5 100644 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerRename changes the name of a given container. diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index 66c3cc1..a9d4c0c 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerResize changes the size of the tty for a container. diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 74d7455..41e4219 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) // ContainerRestart stops and starts a container again. diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index b1f08de..c2e0b15 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 4758c66..6ef44c7 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerStats returns near realtime stats for a given container. diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index b5418ae..5d7f160 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) // ContainerStop stops a container without terminating the process. diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go index 4e7270e..9c9fce7 100644 --- a/vendor/github.com/docker/docker/client/container_top.go +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -1,17 +1,17 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strings" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/container" ) // ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { - var response types.ContainerProcessList +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody query := url.Values{} if len(arguments) > 0 { query.Set("ps_args", strings.Join(arguments, " ")) diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go index 5c76211..1d8f873 100644 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -1,6 +1,6 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerUnpause resumes the process execution within a container func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index 5082f22..14e7f23 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerUpdate updates resources of a container diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 93212c7..6ab8c1d 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -1,26 +1,83 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" - - "golang.org/x/net/context" + "net/url" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - return -1, err +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) } - defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC } - return res.StatusCode, nil + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC } diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index 03c80b3..8eb30eb 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // DiskUsage requests the current data usage from the daemon diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 0000000..7245bbe --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + ensureReaderClosed(resp) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index bf6923f..05c1246 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -1,8 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( "fmt" + "net/http" + "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" ) @@ -36,95 +38,37 @@ type notFound interface { NotFound() bool // Is the error a NotFound error } -// IsErrNotFound returns true if the error is caused with an -// object (image, container, network, volume, …) is not found in the docker host. +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. func IsErrNotFound(err error) bool { te, ok := err.(notFound) return ok && te.NotFound() } -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string +type objectNotFoundError struct { + object string + id string } -// NotFound indicates that this error type is of NotFound -func (e imageNotFoundError) NotFound() bool { +func (e objectNotFoundError) NotFound() bool { return true } -// Error returns a string representation of an imageNotFoundError -func (e imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", e.imageID) -} - -// IsErrImageNotFound returns true if the error is caused -// when an image is not found in the docker host. -func IsErrImageNotFound(err error) bool { - return IsErrNotFound(err) -} - -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// NotFound indicates that this error type is of NotFound -func (e containerNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - -// IsErrContainerNotFound returns true if the error is caused -// when a container is not found in the docker host. -func IsErrContainerNotFound(err error) bool { - return IsErrNotFound(err) -} - -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// NotFound indicates that this error type is of NotFound -func (e networkNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - -// IsErrNetworkNotFound returns true if the error is caused -// when a network is not found in the docker host. -func IsErrNetworkNotFound(err error) bool { - return IsErrNotFound(err) +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// NotFound indicates that this error type is of NotFound -func (e volumeNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a volumeNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrVolumeNotFound(err error) bool { - return IsErrNotFound(err) +func wrapResponseError(err error, resp serverResponse, object, id string) error { + switch { + case err == nil: + return nil + case resp.statusCode == http.StatusNotFound: + return objectNotFoundError{object: object, id: id} + case resp.statusCode == http.StatusNotImplemented: + return notImplementedError{message: err.Error()} + default: + return err + } } // unauthorizedError represents an authorization error in a remote registry. @@ -144,72 +88,6 @@ func IsErrUnauthorized(err error) bool { return ok } -// nodeNotFoundError implements an error returned when a node is not found. -type nodeNotFoundError struct { - nodeID string -} - -// Error returns a string representation of a nodeNotFoundError -func (e nodeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such node: %s", e.nodeID) -} - -// NotFound indicates that this error type is of NotFound -func (e nodeNotFoundError) NotFound() bool { - return true -} - -// IsErrNodeNotFound returns true if the error is caused -// when a node is not found. -func IsErrNodeNotFound(err error) bool { - _, ok := err.(nodeNotFoundError) - return ok -} - -// serviceNotFoundError implements an error returned when a service is not found. -type serviceNotFoundError struct { - serviceID string -} - -// Error returns a string representation of a serviceNotFoundError -func (e serviceNotFoundError) Error() string { - return fmt.Sprintf("Error: No such service: %s", e.serviceID) -} - -// NotFound indicates that this error type is of NotFound -func (e serviceNotFoundError) NotFound() bool { - return true -} - -// IsErrServiceNotFound returns true if the error is caused -// when a service is not found. -func IsErrServiceNotFound(err error) bool { - _, ok := err.(serviceNotFoundError) - return ok -} - -// taskNotFoundError implements an error returned when a task is not found. -type taskNotFoundError struct { - taskID string -} - -// Error returns a string representation of a taskNotFoundError -func (e taskNotFoundError) Error() string { - return fmt.Sprintf("Error: No such task: %s", e.taskID) -} - -// NotFound indicates that this error type is of NotFound -func (e taskNotFoundError) NotFound() bool { - return true -} - -// IsErrTaskNotFound returns true if the error is caused -// when a task is not found. -func IsErrTaskNotFound(err error) bool { - _, ok := err.(taskNotFoundError) - return ok -} - type pluginPermissionDenied struct { name string } @@ -225,54 +103,31 @@ func IsErrPluginPermissionDenied(err error) bool { return ok } -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { - if versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker server is version %s", feature, APIrequired, cli.version) - } - return nil -} - -// secretNotFoundError implements an error returned when a secret is not found. -type secretNotFoundError struct { - name string -} - -// Error returns a string representation of a secretNotFoundError -func (e secretNotFoundError) Error() string { - return fmt.Sprintf("Error: no such secret: %s", e.name) -} - -// NoFound indicates that this error type is of NotFound -func (e secretNotFoundError) NotFound() bool { - return true +type notImplementedError struct { + message string } -// IsErrSecretNotFound returns true if the error is caused -// when a secret is not found. -func IsErrSecretNotFound(err error) bool { - _, ok := err.(secretNotFoundError) - return ok -} - -// pluginNotFoundError implements an error returned when a plugin is not in the docker host. -type pluginNotFoundError struct { - name string +func (e notImplementedError) Error() string { + return e.message } -// NotFound indicates that this error type is of NotFound -func (e pluginNotFoundError) NotFound() bool { +func (e notImplementedError) NotImplemented() bool { return true } -// Error returns a string representation of a pluginNotFoundError -func (e pluginNotFoundError) Error() string { - return fmt.Sprintf("Error: No such plugin: %s", e.name) +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +func IsErrNotImplemented(err error) bool { + te, ok := err.(notImplementedError) + return ok && te.NotImplemented() } -// IsErrPluginNotFound returns true if the error is caused -// when a plugin is not found in the docker host. -func IsErrPluginNotFound(err error) bool { - return IsErrNotFound(err) +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil } diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index af47aef..6e56538 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -1,12 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 74c53f5..2b14831 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "bufio" + "context" "crypto/tls" - "errors" "fmt" "net" "net/http" @@ -12,9 +13,8 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/tlsconfig" "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" + "github.com/pkg/errors" ) // tlsClientCon holds tls information and a dialed connection. @@ -46,37 +46,12 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } req = cli.addHeaders(req, headers) - req.Host = cli.addr - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - - conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + conn, err := cli.setupHijackConn(req, "tcp") if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } return types.HijackedResponse{}, err } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br}, err + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { @@ -95,7 +70,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con timeout := dialer.Timeout if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) + deadlineTimeout := time.Until(dialer.Deadline) if timeout == 0 || deadlineTimeout < timeout { timeout = deadlineTimeout } @@ -139,7 +114,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con // from the hostname we're connecting to. if config.ServerName == "" { // Make a copy to avoid polluting argument or default. - config = tlsconfig.Clone(config) + config = tlsConfigClone(config) config.ServerName = hostname } @@ -175,3 +150,83 @@ func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { } return net.Dial(proto, addr) } + +func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite iff the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) + } + + return c, nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 6fde75d..6721460 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "io" "net/http" "net/url" "strconv" - - "golang.org/x/net/context" + "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -29,7 +29,14 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return types.ImageBuildResponse{}, err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/tar") + + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return types.ImageBuildResponse{}, err + } + query.Set("platform", options.Platform) + } + headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { @@ -48,6 +55,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, } if options.SuppressOutput { query.Set("q", "1") @@ -94,6 +102,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("cgroupparent", options.CgroupParent) query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) ulimitsJSON, err := json.Marshal(options.Ulimits) if err != nil { @@ -118,6 +127,11 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur return query, err } query.Set("cachefrom", string(cacheFromJSON)) - + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } return query, nil } diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index cf023a7..2393804 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -1,26 +1,29 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" + "strings" - "golang.org/x/net/context" - + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/reference" ) // ImageCreate creates a new image based in the parent options. // It returns the JSON content in the response body. func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(parentReference) + ref, err := reference.ParseNormalizedNamed(parentReference) if err != nil { return nil, err } query := url.Values{} - query.Set("fromImage", repository) - query.Set("tag", tag) + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go index acb1ee9..0151b95 100644 --- a/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -1,16 +1,16 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/image" ) // ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { - var history []types.ImageHistory +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) if err != nil { return history, err diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index c6f154b..c2972ea 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" + "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" @@ -15,7 +15,7 @@ import ( func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { if ref != "" { //Check if the given image name can be resolved - if _, err := reference.ParseNamed(ref); err != nil { + if _, err := reference.ParseNormalizedNamed(ref); err != nil { return nil, err } } @@ -25,6 +25,9 @@ func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSour query.Set("repo", ref) query.Set("tag", options.Tag) query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } for _, change := range options.Changes { query.Add("changes", change) } diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index b3a64ce..2f8f6d2 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -1,23 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageInspectWithRaw returns the image information and its raw representation. func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err + return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index f26464f..32fae27 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // ImageList returns a list of images in the docker host. diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index 77aaf1a..91016e4 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -1,11 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 5ef98b7..78ee3f6 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ImagesPrune requests the daemon to delete unused data diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index 3bffdb7..d97aacf 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/http" "net/url" + "strings" - "golang.org/x/net/context" - + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/reference" ) // ImagePull requests the docker host to pull an image from a remote registry. @@ -19,16 +19,19 @@ import ( // FIXME(vdemeester): there is currently used in a few way in docker/docker // - if not in trusted content, ref is used to pass the whole reference, and tag is empty // - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(ref) +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { return nil, err } query := url.Values{} - query.Set("fromImage", repository) - if tag != "" && !options.All { - query.Set("tag", tag) + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) @@ -44,3 +47,18 @@ func (cli *Client) ImagePull(ctx context.Context, ref string, options types.Imag } return resp.body, nil } + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 8e73d28..a15871c 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "errors" "io" "net/http" "net/url" - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" ) @@ -16,31 +15,33 @@ import ( // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { - distributionRef, err := distreference.ParseNamed(ref) +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) if err != nil { return nil, err } - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + if _, isCanonical := ref.(reference.Canonical); isCanonical { return nil, errors.New("cannot push a digest reference") } - var tag = "" - if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag := "" + name := reference.FamiliarName(ref) + + if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { tag = nameTaggedRef.Tag() } query := url.Values{} query.Set("tag", tag) - resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr } - resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) } if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 839e531..45d6e6f 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { query := url.Values{} if options.Force { @@ -19,12 +19,12 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } + var dels []types.ImageDeleteResponseItem resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) if err != nil { - return nil, err + return dels, wrapResponseError(err, resp, "image", imageID) } - var dels []types.ImageDelete err = json.NewDecoder(resp.body).Decode(&dels) ensureReaderClosed(resp) return dels, err diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go index ecac880..d1314e4 100644 --- a/vendor/github.com/docker/docker/client/image_save.go +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ImageSave retrieves one or more images from the docker host as an io.ReadCloser. diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index b0fcd5c..176de3c 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -1,6 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "net/http" @@ -9,7 +10,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // ImageSearch makes the docker host to search by a term in a remote registry. @@ -21,7 +21,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I query.Set("limit", fmt.Sprintf("%d", options.Limit)) if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return results, err } diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index bdbf94a..5652bfc 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -1,34 +1,37 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "errors" - "fmt" + "context" "net/url" - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/reference" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" ) // ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { - distributionRef, err := distreference.ParseNamed(ref) +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) if err != nil { - return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) } - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + if _, isCanonical := ref.(reference.Canonical); isCanonical { return errors.New("refusing to create a tag with a digest reference") } - tag := reference.GetTagFromNamedRef(distributionRef) + ref = reference.TagNameOnly(ref) query := url.Values{} - query.Set("repo", distributionRef.Name()) - query.Set("tag", tag) + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } - resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index ac07961..121f256 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // Info returns information about the docker server. diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 0597803..0487a0b 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -1,23 +1,28 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" + "net" + "net/http" "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" + containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/image" + networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. type CommonAPIClient interface { + ConfigAPIClient ContainerAPIClient + DistributionAPIClient ImageAPIClient NodeAPIClient NetworkAPIClient @@ -28,17 +33,22 @@ type CommonAPIClient interface { SystemAPIClient VolumeAPIClient ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + Close() error } // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error @@ -58,27 +68,33 @@ type ContainerAPIClient interface { ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string) (int64, error) + ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) } +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error @@ -87,13 +103,13 @@ type ImageAPIClient interface { // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, networkID string) error + NetworkRemove(ctx context.Context, network string) error NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) } @@ -107,7 +123,7 @@ type NodeAPIClient interface { // PluginAPIClient defines API client methods for the plugins type PluginAPIClient interface { - PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error @@ -122,11 +138,12 @@ type PluginAPIClient interface { // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) } @@ -153,10 +170,10 @@ type SystemAPIClient interface { // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) } @@ -169,3 +186,12 @@ type SecretAPIClient interface { SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error } + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go index 51da98e..402ffb5 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) type apiClientExperimental interface { diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go index cc90a3c..5502cd7 100644 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -1,4 +1,4 @@ -package client +package client // import "github.com/docker/docker/client" // APIClient is an interface that clients that talk with a docker server must implement. type APIClient interface { diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go index 600dc71..7d66181 100644 --- a/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/docker/docker/client/login.go @@ -1,17 +1,17 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // RegistryLogin authenticates the docker server with a given docker registry. -// It returns UnauthorizerError when the authentication fails. +// It returns unauthorizedError when the authentication fails. func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go index c022c17..5718946 100644 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -1,9 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" - "golang.org/x/net/context" ) // NetworkConnect connects a container to an existent network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 4067a54..41da2ac 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkCreate creates a new network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go index 24b58e3..dd15676 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkDisconnect disconnects a container from an existent network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 5ad4ea5..025f6d8 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -1,30 +1,41 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" + "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) return networkResource, err } // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { - var networkResource types.NetworkResource - resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, nil, networkNotFoundError{networkID} - } - return networkResource, nil, err + return networkResource, nil, wrapResponseError(err, resp, "network", networkID) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index e566a93..f16b2f5 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworkList returns the list of networks configured in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index 7352a7f..6418b8b 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworksPrune requests the daemon to delete unused networks diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index 6bd6748..1274143 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "network", networkID) } diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index abf505d..593b2e9 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -1,23 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeInspectWithRaw returns the node information. func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Node{}, nil, nodeNotFoundError{nodeID} - } - return swarm.Node{}, nil, err + return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index 3e8440f..9883f6f 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeList returns the list of nodes. @@ -15,7 +15,7 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index 0a77f3d..c07fc0e 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( "net/url" - "github.com/docker/docker/api/types" + "context" - "golang.org/x/net/context" + "github.com/docker/docker/api/types" ) // NodeRemove removes a Node. @@ -17,5 +17,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "node", nodeID) } diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index 3ca9760..de32a61 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeUpdate updates a Node. diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 22dcda2..85d38ad 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -1,16 +1,16 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "fmt" + "context" + "path" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) -// Ping pings the server and return the value of the "Docker-Experimental" & "API-Version" headers +// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping - req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } @@ -20,11 +20,13 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { } defer ensureReaderClosed(serverResp) - ping.APIVersion = serverResp.header.Get("API-Version") + if serverResp.header != nil { + ping.APIVersion = serverResp.header.Get("API-Version") - if serverResp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + ping.OSType = serverResp.header.Get("OSType") } - - return ping, nil + return ping, cli.checkResponseErr(serverResp) } diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go index a660ba5..4591db5 100644 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -1,18 +1,18 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginCreate creates a plugin func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/tar") + headers.Set("Content-Type", "application/x-tar") query := url.Values{} query.Set("name", createOptions.RepoName) diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go index 30467db..01f6574 100644 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginDisable disables a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go index 95517c4..736da48 100644 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginEnable enables a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 89f39ee..0ab7bea 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -1,23 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginInspectWithRaw inspects an existing plugin func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return nil, nil, pluginNotFoundError{name} - } - return nil, nil, err + return nil, nil, wrapResponseError(err, resp, "plugin", name) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 3217c4c..13baa40 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -1,6 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "io" "net/http" @@ -9,13 +10,12 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginInstall installs a plugin func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { query := url.Values{} - if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") } query.Set("remote", options.RemoteRef) @@ -60,8 +60,8 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types return } - err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(err) + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) }() return pr, nil } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index 88c480a..ade1051 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -1,18 +1,29 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" + "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/filters" ) // PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { var plugins types.PluginsListResponse - resp, err := cli.get(ctx, "/plugins", nil, nil) + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) if err != nil { - return plugins, err + return plugins, wrapResponseError(err, resp, "plugin", "") } err = json.NewDecoder(resp.body).Decode(&plugins) diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 1e5f963..d20bfe8 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" - - "golang.org/x/net/context" ) // PluginPush pushes a plugin to a registry diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index b017e4d..8563bab 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginRemove removes a plugin @@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "plugin", name) } diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go index 3260d2a..dcf5752 100644 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -1,7 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "golang.org/x/net/context" + "context" ) // PluginSet modifies settings for an existing plugin diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 95a4356..115cea9 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -1,20 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "fmt" + "context" "io" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginUpgrade upgrades a plugin func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } query := url.Values{} - if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") } query.Set("remote", options.RemoteRef) @@ -33,5 +35,5 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index ac05363..a19d62a 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -15,7 +16,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" ) @@ -24,6 +24,7 @@ type serverResponse struct { body io.ReadCloser header http.Header statusCode int + reqURL *url.URL } // head sends an http request to the docker API using the method HEAD. @@ -31,12 +32,12 @@ func (cli *Client) head(ctx context.Context, path string, query url.Values, head return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) } -// getWithContext sends an http request to the docker API using the method GET with a specific go context. +// get sends an http request to the docker API using the method GET with a specific Go context. func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, "GET", path, query, nil, headers) } -// postWithContext sends an http request to the docker API using the method POST with a specific go context. +// post sends an http request to the docker API using the method POST with a specific Go context. func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { @@ -58,7 +59,7 @@ func (cli *Client) put(ctx context.Context, path string, query url.Values, obj i return cli.sendRequest(ctx, "PUT", path, query, body, headers) } -// put sends an http request to the docker API using the method PUT. +// putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, "PUT", path, query, body, headers) } @@ -118,11 +119,15 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u if err != nil { return serverResponse{}, err } - return cli.doRequest(ctx, req) + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, err + } + return resp, cli.checkResponseErr(resp) } func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1} + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} resp, err := ctxhttp.Do(ctx, cli.client, req) if err != nil { @@ -165,7 +170,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // daemon on Windows where the daemon is listening on a named pipe // `//./pipe/docker_engine, and the client must be running elevated. // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.25/info: + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: // open //./pipe/docker_engine: The system cannot find the file specified.`. // Note we can't string compare "The system cannot find the file specified" as // this is localised - for example in French the error would be @@ -179,35 +184,42 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if resp != nil { serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header } + return serverResp, nil +} - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && - resp.Header.Get("Content-Type") == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return serverResp, fmt.Errorf("Error reading JSON: %v", err) - } - errorMessage = errorResponse.Message - } else { - errorMessage = string(body) - } + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } - return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") } - serverResp.body = resp.Body - serverResp.header = resp.Header - return serverResp, nil + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) } func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { @@ -239,9 +251,9 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { } func ensureReaderClosed(response serverResponse) { - if body := response.body; body != nil { + if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, body, 512) + io.CopyN(ioutil.Discard, response.body, 512) response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index de8b041..09fae82 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -1,19 +1,20 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretCreate creates a new Secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var headers map[string][]string - var response types.SecretCreateResponse - resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers) + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) if err != nil { return response, err } diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index f774576..e8322f4 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -1,23 +1,25 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return swarm.Secret{}, nil, secretNotFoundError{id} - } - return swarm.Secret{}, nil, err + return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index 7e9d5ec..f6bf7ba 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -1,21 +1,24 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretList returns the list of secrets. func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index 1955b98..e9d5218 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -1,10 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // SecretRemove removes a Secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "secret", id) } diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index b94e24a..164256b 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -1,16 +1,18 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SecretUpdate updates a Secret. Currently, the only part of a secret spec -// which can be updated is Labels. +// SecretUpdate attempts to update a Secret func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } query := url.Values{} query.Set("version", strconv.FormatUint(version.Index, 10)) resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 3d1be22..8d08271 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -1,23 +1,75 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" + "fmt" + "strings" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) // ServiceCreate creates a new Service. func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var headers map[string][]string + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return types.ServiceCreateResponse{}, err + } + + // ensure that the image is tagged + var imgPlatforms []swarm.Platform + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } } } + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + var response types.ServiceCreateResponse resp, err := cli.post(ctx, "/services/create", nil, service, headers) if err != nil { @@ -25,6 +77,90 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, } err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + ensureReaderClosed(resp) return response, err } + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// an empty string if there are no updates. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return "" +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index ca71cbd..de6aa22 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -1,23 +1,27 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" + "fmt" "io/ioutil" - "net/http" + "net/url" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { - serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Service{}, nil, serviceNotFoundError{serviceID} - } - return swarm.Service{}, nil, err + return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index c29e6d4..7d53e2b 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceList returns the list of services. @@ -15,7 +15,7 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index 24384e3..906fd40 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. @@ -26,7 +26,7 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ty if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index a9331f9..fe3421b 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "service", serviceID) } diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index afa94d4..5a7a61b 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -1,34 +1,80 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceUpdate updates a Service. func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { var ( - headers map[string][]string query = url.Values{} + distErr error ) + headers := map[string][]string{ + "version": {cli.version}, + } + if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, - } + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} } if options.RegistryAuthFrom != "" { query.Set("registryAuthFrom", options.RegistryAuthFrom) } + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + query.Set("version", strconv.FormatUint(version.Index, 10)) + if err := validateServiceSpec(service); err != nil { + return types.ServiceUpdateResponse{}, err + } + + var imgPlatforms []swarm.Platform + // ensure that the image is tagged + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + var response types.ServiceUpdateResponse resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) if err != nil { @@ -36,6 +82,11 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version } err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go new file mode 100644 index 0000000..c247123 --- /dev/null +++ b/vendor/github.com/docker/docker/client/session.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net" + "net/http" +) + +// DialSession returns a connection that can be used communication with daemon +func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", "/session", nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(req, proto) +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index be28d32..0c50c01 100644 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // SwarmGetUnlockKey retrieves the swarm's unlock key. diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go index fd45d06..742ca0f 100644 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmInit initializes the Swarm. +// SwarmInit initializes the swarm. func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go index 6d95cfc..cfaabb2 100644 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmInspect inspects the Swarm. +// SwarmInspect inspects the swarm. func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { serverResp, err := cli.get(ctx, "/swarm", nil, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go index cda9993..a1cf045 100644 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -1,11 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmJoin joins the Swarm. +// SwarmJoin joins the swarm. func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go index a4df732..90ca84b 100644 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -1,12 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) -// SwarmLeave leaves the Swarm. +// SwarmLeave leaves the swarm. func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { query := url.Values{} if force { diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go index addfb59..d2412f7 100644 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -1,17 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmUnlock unlockes locked swarm. +// SwarmUnlock unlocks locked swarm. func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - if err != nil { - return err - } - ensureReaderClosed(serverResp) return err } diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index cc8eeb6..56a5bea 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmUpdate updates the Swarm. +// SwarmUpdate updates the swarm. func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { query := url.Values{} query.Set("version", strconv.FormatUint(version.Index, 10)) diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index bc8058f..e1c0a73 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -1,24 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - - "golang.org/x/net/context" ) // TaskInspectWithRaw returns the task information and its raw representation.. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Task{}, nil, taskNotFoundError{taskID} - } - return swarm.Task{}, nil, err + return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go index 66324da..42d20c1 100644 --- a/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // TaskList returns the list of tasks. @@ -15,7 +15,7 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 0000000..6222fab --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone.go b/vendor/github.com/docker/docker/client/tlsconfig_clone.go new file mode 100644 index 0000000..88200e9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package client // import "github.com/docker/docker/client" + +import "crypto/tls" + +// tlsConfigClone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func tlsConfigClone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go similarity index 86% rename from vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go rename to vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go index 0d5b448..e298542 100644 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go +++ b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go @@ -1,12 +1,12 @@ // +build go1.7,!go1.8 -package tlsconfig +package client // import "github.com/docker/docker/client" import "crypto/tls" -// Clone returns a clone of tls.Config. This function is provided for +// tlsConfigClone returns a clone of tls.Config. This function is provided for // compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { +func tlsConfigClone(c *tls.Config) *tls.Config { return &tls.Config{ Rand: c.Rand, Time: c.Time, diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go index f04e601..5541344 100644 --- a/vendor/github.com/docker/docker/client/transport.go +++ b/vendor/github.com/docker/docker/client/transport.go @@ -1,22 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( "crypto/tls" - "errors" "net/http" ) -var errTLSConfigUnavailable = errors.New("TLSConfig unavailable") - -// transportFunc allows us to inject a mock transport for testing. We define it -// here so we can detect the tlsconfig and return nil for only this type. -type transportFunc func(*http.Request) (*http.Response, error) - -func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { - return tf(req) -} - -// resolveTLSConfig attempts to resolve the tls configuration from the +// resolveTLSConfig attempts to resolve the TLS configuration from the // RoundTripper. func resolveTLSConfig(transport http.RoundTripper) *tls.Config { switch tr := transport.(type) { diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go index 23d520e..7f3ff44 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/docker/docker/client/utils.go @@ -1,9 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "github.com/docker/docker/api/types/filters" "net/url" "regexp" + + "github.com/docker/docker/api/types/filters" ) var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) @@ -23,7 +24,7 @@ func getDockerOS(serverHeader string) string { func getFiltersQuery(f filters.Args) (url.Values, error) { query := url.Values{} if f.Len() > 0 { - filterJSON, err := filters.ToParam(f) + filterJSON, err := filters.ToJSON(f) if err != nil { return query, err } diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go index 933ceb4..1989f6d 100644 --- a/vendor/github.com/docker/docker/client/version.go +++ b/vendor/github.com/docker/docker/client/version.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ServerVersion returns information of the docker client and server host. diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index 9620c87..f1f6fcd 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index 3860e9b..f840682 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -1,13 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // VolumeInspect returns the information about a specific volume in the docker host. @@ -18,13 +17,14 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + if volumeID == "" { + return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + var volume types.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, nil, volumeNotFoundError{volumeID} - } - return volume, nil, err + return volume, nil, wrapResponseError(err, resp, "volume", volumeID) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 32247ce..284554d 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -1,17 +1,17 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/filters" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { - var volumes volumetypes.VolumesListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index a07e4ce..70041ef 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // VolumesPrune requests the daemon to delete unused data @@ -29,7 +29,7 @@ func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 6c26575..fc5a71d 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // VolumeRemove removes a volume from the docker host. @@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "volume", volumeID) } diff --git a/vendor/github.com/docker/docker/contrib/README.md b/vendor/github.com/docker/docker/contrib/README.md new file mode 100644 index 0000000..92b1d94 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/README.md @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c new file mode 100644 index 0000000..b767da7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main(int argc, char *argv[]) +{ + printf("EUID=%d\n", geteuid()); + return 0; +} + diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905..0000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905..0000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE deleted file mode 100644 index 5b6e7c6..0000000 --- a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,340 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE deleted file mode 100644 index e67cdab..0000000 --- a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 Honza Pokorny -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c new file mode 100644 index 0000000..88ac287 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c @@ -0,0 +1,16 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int err = acct("/tmp/t"); + if (err == -1) { + fprintf(stderr, "acct failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s new file mode 100644 index 0000000..8bbb5c5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s @@ -0,0 +1,7 @@ +.globl _start +.text +_start: + xorl %eax, %eax + incl %eax + movb $0, %bl + int $0x80 diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c new file mode 100644 index 0000000..6243886 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c new file mode 100644 index 0000000..7995a0d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c @@ -0,0 +1,14 @@ +#include +#include +#include +#include +#include + +int main() { + if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { + perror("socket"); + return 1; + } + + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c new file mode 100644 index 0000000..df9680c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setgid(1) == -1) { + perror("setgid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c new file mode 100644 index 0000000..5b93967 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setuid(1) == -1) { + perror("setuid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c new file mode 100644 index 0000000..d26c82f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include +#include +#include + +int main() { + int s; + struct sockaddr_in sin; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s == -1) { + perror("socket"); + return 1; + } + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + sin.sin_port = htons(80); + + if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { + perror("bind"); + return 1; + } + + close(s); + + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c new file mode 100644 index 0000000..4c5c8d3 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp arguments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWUSER | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/docs/static_files/contributors.png b/vendor/github.com/docker/docker/docs/static_files/contributors.png deleted file mode 100644 index 63c0a0c..0000000 Binary files a/vendor/github.com/docker/docker/docs/static_files/contributors.png and /dev/null differ diff --git a/vendor/github.com/docker/docker/hack/generate-authors.sh b/vendor/github.com/docker/docker/hack/generate-authors.sh deleted file mode 100755 index e78a97f..0000000 --- a/vendor/github.com/docker/docker/hack/generate-authors.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." - -# see also ".mailmap" for how email addresses and names are deduplicated - -{ - cat <<-'EOH' - # This file lists all individuals having contributed content to the repository. - # For how it is generated, see `hack/generate-authors.sh`. - EOH - echo - git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf -} > AUTHORS diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default deleted file mode 120000 index 4278533..0000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init deleted file mode 120000 index 8cb89d3..0000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart deleted file mode 120000 index 7e1b64a..0000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev deleted file mode 120000 index 914a361..0000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md new file mode 100644 index 0000000..755cd96 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Moby project without being specific to its internals. + +Utility packages are kept separate from the moby core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Moby organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 0000000..ee15ed5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,21 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + + "github.com/docker/docker/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 0000000..75ada2f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 0000000..d85e124 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 0000000..2f81813 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 0000000..e2b4931 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,266 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000..1d87ea3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,230 @@ +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + + stat, err := system.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if !chownExisting { + return nil + } + + // short-circuit--we were called with an existing directory and chown was requested + return lazyChown(path, ownerUID, ownerGID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode, ""); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} + +// lazyChown performs a chown only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +func lazyChown(p string, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000..d72cc28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,23 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000..6272c5a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000..e7c4d63 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 0000000..903ac45 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 0000000..272363b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 0000000..ef35ef9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 0000000..a1b199a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 0000000..cc6c475 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 0000000..874aff6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,141 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "sort" + "strings" + "syscall" + + "github.com/sirupsen/logrus" +) + +// FilterFunc is a type defining a callback function +// to filter out unwanted entries. It takes a pointer +// to an Info struct (not fully populated, currently +// only Mountpoint is filled in), and returns two booleans: +// - skip: true if the entry should be skipped +// - stop: true if parsing should be stopped after the entry +type FilterFunc func(*Info) (skip, stop bool) + +// PrefixFilter discards all entries whose mount points +// do not start with a prefix specified +func PrefixFilter(prefix string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(m.Mountpoint, prefix) + return skip, false + } +} + +// SingleEntryFilter looks for a specific entry +func SingleEntryFilter(mp string) FilterFunc { + return func(m *Info) (bool, bool) { + if m.Mountpoint == mp { + return false, true // don't skip, stop now + } + return true, false // skip, keep going + } +} + +// ParentsFilter returns all entries whose mount points +// can be parents of a path specified, discarding others. +// For example, given `/var/lib/docker/something`, entries +// like `/var/lib/docker`, `/var` and `/` are returned. +func ParentsFilter(path string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(path, m.Mountpoint) + return skip, false + } +} + +// GetMounts retrieves a list of mounts for the current running process, +// with an optional filter applied (use nil for no filter). +func GetMounts(f FilterFunc) ([]*Info, error) { + return parseMountTable(f) +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo. +func Mounted(mountpoint string) (bool, error) { + entries, err := GetMounts(SingleEntryFilter(mountpoint)) + if err != nil { + return false, err + } + + return len(entries) > 0, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + err := unmount(target, mntDetach) + if err == syscall.EINVAL { + // ignore "not mounted" error + err = nil + } + return err +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := parseMountTable(PrefixFilter(target)) + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + logrus.Debugf("Trying to unmount %s", m.Mountpoint) + err = unmount(m.Mountpoint, mntDetach) + if err != nil { + // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is + // not a mountpoint (which is ok in this case). + // Meanwhile calling `Mounted()` is very expensive. + // + // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching + // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms), + // this is nicer than defining a custom value that we can refer to in each platform file. + if err == syscall.EINVAL { + continue + } + if i == len(mounts)-1 { + if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { + return err + } + continue + } + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) + continue + } + + logrus.Debugf("Unmounted %s", m.Mountpoint) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 0000000..b6ab83a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,60 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 0000000..631daf1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,57 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + } + + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 0000000..1428dff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 0000000..ecd03fc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 0000000..36c89dc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,55 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable(filter FilterFunc) ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + var skip, stop bool + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + + out = append(out, &mountinfo) + if stop { + break + } + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 0000000..c1dba01 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,132 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { + s := bufio.NewScanner(r) + out := []*Info{} + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + /* + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ + + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + } + + p := &Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) + + p.Root = fields[3] + p.Mountpoint = fields[4] + p.Opts = fields[5] + + var skip, stop bool + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the such as + fields[7]...fields[N] (where N < sepIndex), although + as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + break + } + } + if i == numFields { + return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) + } + + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) + } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.Fstype = fields[i+1] + p.Source = fields[i+2] + p.VfsOpts = fields[i+3] + + out = append(out, p) + if stop { + break + } + } + return out, nil +} + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable(filter FilterFunc) ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, filter) +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, nil) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 0000000..fd16d3e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "fmt" + "runtime" +) + +func parseMountTable(f FilterFunc) ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go new file mode 100644 index 0000000..27e0f69 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +func parseMountTable(f FilterFunc) ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000..538f663 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,67 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 0000000..c26a4e2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,31 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + return setCTime(name, mtime) +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go new file mode 100644 index 0000000..259138a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 0000000..d3a115f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,26 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 0000000..2573d71 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") +) diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go new file mode 100644 index 0000000..4ba8fe3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 0000000..adeb163 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,67 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 0000000..a1f6013 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,296 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 0000000..a17597a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go new file mode 100644 index 0000000..4996a67 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(experimental bool) { +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 0000000..4910ff6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,12 @@ +package system // import "github.com/docker/docker/pkg/system" + +// lcowSupported determines if Linux Containers on Windows are supported. +var lcowSupported = false + +// InitLCOW sets whether LCOW is supported or not +func InitLCOW(experimental bool) { + v := GetOSVersion() + if experimental && v.Build >= 16299 { + lcowSupported = true + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 0000000..5c3fbfe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,69 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ValidatePlatform(platform *specs.Platform) error { + platform.Architecture = strings.ToLower(platform.Architecture) + platform.OS = strings.ToLower(platform.OS) + // Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do + // not support anything except operating system. + if platform.Architecture != "" { + return fmt.Errorf("invalid platform architecture %q", platform.Architecture) + } + if platform.OS != "" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return fmt.Errorf("invalid platform os %q", platform.OS) + } + } + if len(platform.OSFeatures) != 0 { + return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures) + } + if platform.OSVersion != "" { + return fmt.Errorf("invalid platform osversion %q", platform.OSVersion) + } + if platform.Variant != "" { + return fmt.Errorf("invalid platform variant %q", platform.Variant) + } + return nil +} + +// ParsePlatform parses a platform string in the format os[/arch[/variant] +// into an OCI image-spec platform structure. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ParsePlatform(in string) *specs.Platform { + p := &specs.Platform{} + elements := strings.SplitN(strings.ToLower(in), "/", 3) + if len(elements) == 3 { + p.Variant = elements[2] + } + if len(elements) >= 2 { + p.Architecture = elements[1] + } + if len(elements) >= 1 { + p.OS = elements[0] + } + return p +} + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if runtime.GOOS == os { + return true + } + if LCOWSupported() && os == "linux" { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go new file mode 100644 index 0000000..26397fb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go new file mode 100644 index 0000000..f0139df --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go new file mode 100644 index 0000000..7477995 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 0000000..359c791 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 0000000..6667eb8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system // import "github.com/docker/docker/pkg/system" + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 0000000..d79e8b0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000..56f4494 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows + +package system // import "github.com/docker/docker/pkg/system" + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 0000000..6ed93f2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 0000000..b132482 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(unix.Mkdev(uint32(major), uint32(minor))) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 0000000..ec89d7a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 0000000..a3d957a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,60 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/containerd/continuity/pathdriver" +) + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(os string) string { + if runtime.GOOS == "windows" { + if os != runtime.GOOS { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go new file mode 100644 index 0000000..0195a89 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd darwin + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 0000000..4e70c97 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + p.Kill() + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 0000000..02e4d26 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 50 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 0000000..c1c0ee9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 0000000..c1c0ee9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 0000000..98c9eb1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 0000000..756b92d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 0000000..756b92d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go new file mode 100644 index 0000000..3d7e2eb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -0,0 +1,65 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 0000000..b2456cb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 0000000..919a412 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 0000000..ee7e025 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,127 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 0000000..9912a2b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 0000000..fc62388 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,7 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000..ed1b9fa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 0000000..0afe854 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000..095e072 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 0000000..66d4895 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,29 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { + return nil, nil + } + if errno == unix.ERANGE { + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000..d780a90 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system // import "github.com/docker/docker/pkg/system" + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go deleted file mode 100644 index e4dec3a..0000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go deleted file mode 100644 index 0b81665..0000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build go1.6,!go1.7 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.6 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} diff --git a/vendor/github.com/docker/docker/project/CONTRIBUTORS.md b/vendor/github.com/docker/docker/project/CONTRIBUTORS.md deleted file mode 120000 index 44fcc63..0000000 --- a/vendor/github.com/docker/docker/project/CONTRIBUTORS.md +++ /dev/null @@ -1 +0,0 @@ -../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf new file mode 100644 index 0000000..83695a3 --- /dev/null +++ b/vendor/github.com/docker/docker/vendor.conf @@ -0,0 +1,158 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 +github.com/Microsoft/hcsshim v0.6.11 +github.com/Microsoft/go-winio v0.4.6 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/Microsoft/opengcs v0.3.6 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.3 +github.com/sirupsen/logrus v1.0.3 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +golang.org/x/net 5561cd9b4330353950f399814f427425c0a26fd2 +golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd +github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 +github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 +golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 +github.com/pmezard/go-difflib v1.0.0 +github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029 +github.com/google/go-cmp v0.2.0 + +github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 +github.com/imdario/mergo 0.2.1 +golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 + +github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8 +github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2 + +#get libnetwork packages + +# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly +github.com/docker/libnetwork eb6b2a57955e5c149d47c3973573216e8f8baa09 +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c +github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 +github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e + +# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/install/tomlv accordingly +github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd v3.2.1 +github.com/coreos/go-semver v0.2.0 +github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 +github.com/hashicorp/consul v0.5.2 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 +github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb + +# get graph and distribution packages +github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 +github.com/vbatts/tar-split v0.10.2 +github.com/opencontainers/go-digest v1.0.0-rc1 + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +google.golang.org/grpc v1.3.0 + +# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly +github.com/opencontainers/runc 4fc53a81fb7c994640722ac585fa9ca548971871 +github.com/opencontainers/runtime-spec v1.0.1 +github.com/opencontainers/image-spec v1.0.1 +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 + +# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) +github.com/coreos/go-systemd v15 +github.com/godbus/dbus v4.0.0 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4 + +# gelf logging driver deps +github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841 + +github.com/fluent/fluent-logger-golang v1.3.0 +# fluent-logger-golang deps +github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972 +github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad + +# fsnotify +github.com/fsnotify/fsnotify 4da3e2cfbabc9f751898f250b49f2439785783a1 + +# awslogs deps +github.com/aws/aws-sdk-go v1.12.66 +github.com/go-ini/ini v1.25.4 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf + +# gcplogs deps +golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0 +google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823 +cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525 +github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 + +# containerd +github.com/containerd/containerd 4ac4fd0b6a268fe6f38b2b2e32e40daa7e424fac +github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 +github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 +github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 +github.com/containerd/console 2748ece16665b45a47f884001d5831ec79703880 +github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f +github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 +github.com/dmcgowan/go-tar go1.10 +github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 + +# cluster +github.com/docker/swarmkit bd69f6e8e301645afd344913fa1ede53a0a111fb +github.com/gogo/protobuf v0.4 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2 +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +github.com/matttproud/golang_protobuf_extensions v1.0.0 +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 +github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 + +# cli +github.com/spf13/cobra v0.0.3 +github.com/spf13/pflag v1.0.1 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty + +# metrics +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 + +github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd + + +# archive/tar (for Go 1.10, see https://github.com/golang/go/issues/24787) +# mkdir -p ./vendor/archive +# git clone -b go-1.10 --depth=1 git@github.com:kolyshkin/go-tar.git ./vendor/archive/tar +# vndr # to clean up test files diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md new file mode 100644 index 0000000..d257e44 --- /dev/null +++ b/vendor/github.com/docker/go-connections/README.md @@ -0,0 +1,13 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections) + +# Introduction + +go-connections provides common package to work with network connections. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation. + +## License + +go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 4d5f5ae..bb7e4e3 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -113,7 +113,7 @@ func SplitProtoPort(rawPort string) (string, string) { } func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { if availableProto == proto { return true } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go index 9ca9745..1ff81c3 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -4,7 +4,6 @@ package tlsconfig import ( "crypto/x509" - ) // SystemCertPool returns an new empty cert pool, diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 1b31bbb..f11f166 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -65,22 +65,34 @@ var allTLSVersions = map[uint16]struct{}{ } // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault() *tls.Config { - return &tls.Config{ +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d7..0000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 9b3b6b1..0000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "suda.akihiro@lab.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b35..0000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go index ba02af2..544b5dd 100644 --- a/vendor/github.com/docker/go-units/duration.go +++ b/vendor/github.com/docker/go-units/duration.go @@ -18,9 +18,9 @@ func HumanDuration(d time.Duration) string { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" - } else if minutes < 46 { + } else if minutes < 60 { return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { + } else if hours := int(d.Hours()); hours == 1 { return "About an hour" } else if hours < 48 { return fmt.Sprintf("%d hours", hours) diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index 85f6ab0..b6485ed 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -31,7 +31,7 @@ type unitMap map[string]int64 var ( decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) ) var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} @@ -58,7 +58,7 @@ func CustomSize(format string, size float64, base float64, _map []string) string // instead of 4 digit precision used in units.HumanSize. func HumanSizeWithPrecision(size float64, precision int) string { size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) + return fmt.Sprintf("%.*g %s", precision, size, unit) } // HumanSize returns a human-readable approximation of a size @@ -70,7 +70,7 @@ func HumanSize(size float64) string { // BytesSize returns a human-readable size in bytes, kibibytes, // mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/libtrust/LICENSE similarity index 99% rename from vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE rename to vendor/github.com/docker/libtrust/LICENSE index 34c4ea7..2744858 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ b/vendor/github.com/docker/libtrust/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2014-2016 Docker, Inc. + Copyright 2014 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 0000000..dcffb31 --- /dev/null +++ b/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 0000000..3dcca33 --- /dev/null +++ b/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 0000000..ec5d215 --- /dev/null +++ b/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 0000000..00bbe4b --- /dev/null +++ b/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 0000000..5b2b4fc --- /dev/null +++ b/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 0000000..a2df787 --- /dev/null +++ b/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 0000000..cb2ca9a --- /dev/null +++ b/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 0000000..73642db --- /dev/null +++ b/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 0000000..c526de5 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 0000000..9a98ae3 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 0000000..dac4cac --- /dev/null +++ b/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 0000000..a5a101d --- /dev/null +++ b/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000..7be0cc7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,36 @@ +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README new file mode 100644 index 0000000..c820827 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/README @@ -0,0 +1,257 @@ +GoGoProtobuf http://github.com/gogo/protobuf extends +GoProtobuf http://github.com/golang/protobuf + +# Go support for Protocol Buffers + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.4. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + +GoGoProtobuf provides extensions for protocol buffers and GoProtobuf +see http://github.com/gogo/protobuf/gogoproto/doc.go + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --gogo_out +parameter set to the directory you want to output the Go code to. + + protoc --gogo_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +If you are using any gogo.proto extensions you will need to specify the +proto_path to include the descriptor.proto and gogo.proto. +gogo.proto is located in github.com/gogo/protobuf/gogoproto +This should be fine, since your import is the same. +descriptor.proto is located in either github.com/gogo/protobuf/protobuf +or code.google.com/p/protobuf/trunk/src/ +Its import is google/protobuf/descriptor.proto so it might need some help. + + protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + + protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto + + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. Useful for things like generating protos in a + subdirectory, or regenerating vendored protobufs in-place. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --gogo_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +## Plugins ## + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md new file mode 100644 index 0000000..ae56495 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/Readme.md @@ -0,0 +1,122 @@ +# Protocol Buffers for Go with Gadgets + +[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) + +gogoprotobuf is a fork of golang/protobuf with extra code generation features. + +This code generation is used to achieve: + + - fast marshalling and unmarshalling + - more canonical Go structures + - goprotobuf compatibility + - less typing by optionally generating extra helper code + - peace of mind by optionally generating test and benchmark code + - other serialization formats + +Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this +issue + +## Users + +These projects use gogoprotobuf: + + - etcd - blog - sample proto file + - spacemonkey - blog + - badoo - sample proto file + - mesos-go - sample proto file + - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com + - cockroachdb - sample proto file + - go-ipfs - sample proto file + - rkive-go - sample proto file + - dropbox + - srclib - sample proto file + - adyoulike + - cloudfoundry - sample proto file + - kubernetes - go2idl built on top of gogoprotobuf + - dgraph - release notes - benchmarks + - centrifugo - release notes - blog + - docker swarmkit - sample proto file + - nats.io - go-nats-streaming + - tidb - Communication between tidb and tikv + - protoactor-go - vanity command that also generates actors from service definitions + - containerd - vanity command with custom field names that conforms to the golang convention. + - nakama + - proteus + - carbonzipper stack + - SendGrid + +Please let us know if you are using gogoprotobuf by posting on our GoogleGroup. + +### Mentioned + + - Cloudflare - go serialization talk - Albert Strasheim + - GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson + - alecthomas' go serialization benchmarks + +## Getting Started + +There are several ways to use gogoprotobuf, but for all you need to install go and protoc. +After that you can choose: + + - Speed + - More Speed and more generated code + - Most Speed and most customization + +### Installation + +To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 and 1.8 are continuously tested. + +Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). +Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.3.0 are continuously tested. + +### Speed + +Install the protoc-gen-gofast binary + + go get github.com/gogo/protobuf/protoc-gen-gofast + +Use it to generate faster marshaling and unmarshaling go code for your protocol buffers. + + protoc --gofast_out=. myproto.proto + +This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). + +### More Speed and more generated code + +Fields without pointers cause less time in the garbage collector. +More code generation results in more convenient methods. + +Other binaries are also included: + + protoc-gen-gogofast (same as gofast, but imports gogoprotobuf) + protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields) + protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods) + +Installing any of these binaries is easy. Simply run: + + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/{binary} + go get github.com/gogo/protobuf/gogoproto + +These binaries allow you to using gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). + +### Most Speed and most customization + +Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize. +gogoprotobuf also offers more serialization formats and generation of tests and even more methods. + +Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation. + +Install protoc-gen-gogo: + + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/jsonpb + go get github.com/gogo/protobuf/protoc-gen-gogo + go get github.com/gogo/protobuf/gogoproto + +## GRPC + +It works the same as golang/protobuf, simply specify the plugin. +Here is an example using gofast: + + protoc --gofast_out=plugins=grpc:. my.proto diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 0000000..5d4cba4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,234 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 0000000..737f273 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,978 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { + if isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + continue + } + } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go new file mode 100644 index 0000000..6fb74de --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -0,0 +1,172 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + size := reflect.TypeOf(value).Elem().Size() + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + var zero field + setCustomType(newBas, zero, custom) + + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 0000000..93464c9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 0000000..18e2a5f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 0000000..8b84d1b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 0000000..32111b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,350 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + x := 0 + if v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 0000000..2ed1cf5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 0000000..0dfcb53 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,693 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +type extensionRange interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() +var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() +var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extensionRange, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) + } + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(pb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + return nil + } + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 0000000..ea6478f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,294 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" +) + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + return SizeOfExtensionMap(m.extensionsWrite()) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return extensionsMapSize(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionsMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionsMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil + } + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil +} + +func (this Extension) GoString() string { + if this.enc == nil { + if err := encodeExtension(&this); err != nil { + panic(err) + } + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 0000000..c98d73d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,897 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 0000000..4b4f7c9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,42 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 0000000..fd982de --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000..fb512e2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 0000000..1763a5f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000..6b5567d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 0000000..f156a29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,128 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + + oldHeader := structPointer_GetSliceHeader(base, f) + oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() + newLen := oldHeader.Len + 1 + newSlice := reflect.MakeSlice(typ, newLen, newLen) + reflect.Copy(newSlice, oldSlice) + bas := toStructPointer(newSlice) + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 0000000..44b3320 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,968 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + StdTime bool + StdDuration bool + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + } else { + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_ref_bool + } + case reflect.Int32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + } else { + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_int32 + } + case reflect.Uint32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_ref_uint32 + } + case reflect.Int64, reflect.Uint64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.Float32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_uint32 + } + case reflect.Float64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.String: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + } else { + p.enc = (*Buffer).enc_ref_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_ref_string + } + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) || + reflect.PtrTo(t).Implements(extendableBytesType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 0000000..b6b7176 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,111 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 0000000..5a5fd93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 0000000..952f977 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if pv.Type().Implements(extensionRangeType) { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 0000000..1d6c6aa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 0000000..f127672 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1013 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 0000000..9324f65 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 0000000..d427647 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto new file mode 100644 index 0000000..7eaf229 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto @@ -0,0 +1,139 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto new file mode 100644 index 0000000..f71baa0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto @@ -0,0 +1,166 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto new file mode 100644 index 0000000..cc8a002 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto @@ -0,0 +1,831 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + //reserved 8; // javalite_serializable + //reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + //reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto new file mode 100644 index 0000000..8bbaa8b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto new file mode 100644 index 0000000..6057c85 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto new file mode 100644 index 0000000..994af79 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto @@ -0,0 +1,246 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto new file mode 100644 index 0000000..4f78641 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto new file mode 100644 index 0000000..4ba0b97 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto new file mode 100644 index 0000000..c5632e5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md new file mode 100644 index 0000000..c60a31b --- /dev/null +++ b/vendor/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go new file mode 100644 index 0000000..81cb128 --- /dev/null +++ b/vendor/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go new file mode 100644 index 0000000..73c7400 --- /dev/null +++ b/vendor/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 0000000..e60301b --- /dev/null +++ b/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 0000000..b2deed3 --- /dev/null +++ b/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.domain.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.domain.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.domain.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.domain.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 0000000..5b5f8e7 --- /dev/null +++ b/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,353 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "path" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairs(pairs ...string) (map[string]string, error) { + length := len(pairs) + if length%2 != 0 { + return nil, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMap returns true if the given key/value pairs exist in a given map. +func matchMap(toCheck map[string]string, toMatch map[string][]string, + canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 0000000..a630548 --- /dev/null +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,276 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]+" + matchPrefix = true + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + if !matchQuery { + pattern.WriteByte('^') + } + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.regexp.MatchString(req.URL.RawQuery) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(pairs ...string) (string, error) { + values, err := mapFromPairs(pairs...) + if err != nil { + return "", err + } + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + rawQuery := req.URL.RawQuery + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(rawQuery) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 0000000..c310e66 --- /dev/null +++ b/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,524 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMap(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairs(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.domain.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.domain.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + var scheme, host, path string + var err error + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(pairs...); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(pairs...); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + host, err := r.regexp.host.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + path, err := r.regexp.path.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000..65dc692 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 0000000..1e69004 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 0000000..17d4f90 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 0000000..9584a98 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,15 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 0000000..42f2514 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 0000000..7384cf9 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine,!ppc64,!ppc64le + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go new file mode 100644 index 0000000..44e5d21 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go @@ -0,0 +1,19 @@ +// +build linux +// +build ppc64 ppc64le + +package isatty + +import ( + "unsafe" + + syscall "golang.org/x/sys/unix" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 0000000..9d8b4a5 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,10 @@ +// +build !windows +// +build !appengine + +package isatty + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 0000000..1f0c6bf --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 0000000..af51cbc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mistifyio/go-zfs/LICENSE b/vendor/github.com/mistifyio/go-zfs/LICENSE new file mode 100644 index 0000000..f4c265c --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2014, OmniTI Computer Consulting, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/mistifyio/go-zfs/README.md b/vendor/github.com/mistifyio/go-zfs/README.md new file mode 100644 index 0000000..2515e58 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/README.md @@ -0,0 +1,54 @@ +# Go Wrapper for ZFS # + +Simple wrappers for ZFS command line tools. + +[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs) + +## Requirements ## + +You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS: + + sudo apt-get install python-software-properties + sudo apt-add-repository ppa:zfs-native/stable + sudo apt-get update + sudo apt-get install ubuntu-zfs libzfs-dev + +Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install + +Generally you need root privileges to use anything zfs related. + +## Status ## + +This has been only been tested on Ubuntu 14.04 + +In the future, we hope to work directly with libzfs. + +# Hacking # + +The tests have decent examples for most functions. + +```go +//assuming a zpool named test +//error handling ommitted + + +f, err := zfs.CreateFilesystem("test/snapshot-test", nil) +ok(t, err) + +s, err := f.Snapshot("test", nil) +ok(t, err) + +// snapshot is named "test/snapshot-test@test" + +c, err := s.Clone("test/clone-test", nil) + +err := c.Destroy() +err := s.Destroy() +err := f.Destroy() + +``` + +# Contributing # + +See the [contributing guidelines](./CONTRIBUTING.md) + diff --git a/vendor/github.com/mistifyio/go-zfs/error.go b/vendor/github.com/mistifyio/go-zfs/error.go new file mode 100644 index 0000000..5408ccd --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/error.go @@ -0,0 +1,18 @@ +package zfs + +import ( + "fmt" +) + +// Error is an error which is returned when the `zfs` or `zpool` shell +// commands return with a non-zero exit code. +type Error struct { + Err error + Debug string + Stderr string +} + +// Error returns the string representation of an Error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr) +} diff --git a/vendor/github.com/mistifyio/go-zfs/utils.go b/vendor/github.com/mistifyio/go-zfs/utils.go new file mode 100644 index 0000000..c7178d6 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/utils.go @@ -0,0 +1,352 @@ +package zfs + +import ( + "bytes" + "errors" + "fmt" + "io" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/pborman/uuid" +) + +type command struct { + Command string + Stdin io.Reader + Stdout io.Writer +} + +func (c *command) Run(arg ...string) ([][]string, error) { + + cmd := exec.Command(c.Command, arg...) + + var stdout, stderr bytes.Buffer + + if c.Stdout == nil { + cmd.Stdout = &stdout + } else { + cmd.Stdout = c.Stdout + } + + if c.Stdin != nil { + cmd.Stdin = c.Stdin + + } + cmd.Stderr = &stderr + + id := uuid.New() + joinedArgs := strings.Join(cmd.Args, " ") + + logger.Log([]string{"ID:" + id, "START", joinedArgs}) + err := cmd.Run() + logger.Log([]string{"ID:" + id, "FINISH"}) + + if err != nil { + return nil, &Error{ + Err: err, + Debug: strings.Join([]string{cmd.Path, joinedArgs}, " "), + Stderr: stderr.String(), + } + } + + // assume if you passed in something for stdout, that you know what to do with it + if c.Stdout != nil { + return nil, nil + } + + lines := strings.Split(stdout.String(), "\n") + + //last line is always blank + lines = lines[0 : len(lines)-1] + output := make([][]string, len(lines)) + + for i, l := range lines { + output[i] = strings.Fields(l) + } + + return output, nil +} + +func setString(field *string, value string) { + v := "" + if value != "-" { + v = value + } + *field = v +} + +func setUint(field *uint64, value string) error { + var v uint64 + if value != "-" { + var err error + v, err = strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + } + *field = v + return nil +} + +func (ds *Dataset) parseLine(line []string) error { + var err error + + if len(line) != len(dsPropList) { + return errors.New("Output does not match what is expected on this platform") + } + setString(&ds.Name, line[0]) + setString(&ds.Origin, line[1]) + + if err = setUint(&ds.Used, line[2]); err != nil { + return err + } + if err = setUint(&ds.Avail, line[3]); err != nil { + return err + } + + setString(&ds.Mountpoint, line[4]) + setString(&ds.Compression, line[5]) + setString(&ds.Type, line[6]) + + if err = setUint(&ds.Volsize, line[7]); err != nil { + return err + } + if err = setUint(&ds.Quota, line[8]); err != nil { + return err + } + + if runtime.GOOS == "solaris" { + return nil + } + + if err = setUint(&ds.Written, line[9]); err != nil { + return err + } + if err = setUint(&ds.Logicalused, line[10]); err != nil { + return err + } + if err = setUint(&ds.Usedbydataset, line[11]); err != nil { + return err + } + return nil +} + +/* + * from zfs diff`s escape function: + * + * Prints a file name out a character at a time. If the character is + * not in the range of what we consider "printable" ASCII, display it + * as an escaped 3-digit octal value. ASCII values less than a space + * are all control characters and we declare the upper end as the + * DELete character. This also is the last 7-bit ASCII character. + * We choose to treat all 8-bit ASCII as not printable for this + * application. + */ +func unescapeFilepath(path string) (string, error) { + buf := make([]byte, 0, len(path)) + llen := len(path) + for i := 0; i < llen; { + if path[i] == '\\' { + if llen < i+4 { + return "", fmt.Errorf("Invalid octal code: too short") + } + octalCode := path[(i + 1):(i + 4)] + val, err := strconv.ParseUint(octalCode, 8, 8) + if err != nil { + return "", fmt.Errorf("Invalid octal code: %v", err) + } + buf = append(buf, byte(val)) + i += 4 + } else { + buf = append(buf, path[i]) + i++ + } + } + return string(buf), nil +} + +var changeTypeMap = map[string]ChangeType{ + "-": Removed, + "+": Created, + "M": Modified, + "R": Renamed, +} +var inodeTypeMap = map[string]InodeType{ + "B": BlockDevice, + "C": CharacterDevice, + "/": Directory, + ">": Door, + "|": NamedPipe, + "@": SymbolicLink, + "P": EventPort, + "=": Socket, + "F": File, +} + +// matches (+1) or (-1) +var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)") + +func parseReferenceCount(field string) (int, error) { + matches := referenceCountRegex.FindStringSubmatch(field) + if matches == nil { + return 0, fmt.Errorf("Regexp does not match") + } + return strconv.Atoi(matches[1]) +} + +func parseInodeChange(line []string) (*InodeChange, error) { + llen := len(line) + if llen < 1 { + return nil, fmt.Errorf("Empty line passed") + } + + changeType := changeTypeMap[line[0]] + if changeType == 0 { + return nil, fmt.Errorf("Unknown change type '%s'", line[0]) + } + + switch changeType { + case Renamed: + if llen != 4 { + return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen) + } + case Modified: + if llen != 4 && llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen) + } + default: + if llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen) + } + } + + inodeType := inodeTypeMap[line[1]] + if inodeType == 0 { + return nil, fmt.Errorf("Unknown inode type '%s'", line[1]) + } + + path, err := unescapeFilepath(line[2]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + + var newPath string + var referenceCount int + switch changeType { + case Renamed: + newPath, err = unescapeFilepath(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + case Modified: + if llen == 4 { + referenceCount, err = parseReferenceCount(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse reference count: %v", err) + } + } + default: + newPath = "" + } + + return &InodeChange{ + Change: changeType, + Type: inodeType, + Path: path, + NewPath: newPath, + ReferenceCountChange: referenceCount, + }, nil +} + +// example input +//M / /testpool/bar/ +//+ F /testpool/bar/hello.txt +//M / /testpool/bar/hello.txt (+1) +//M / /testpool/bar/hello-hardlink +func parseInodeChanges(lines [][]string) ([]*InodeChange, error) { + changes := make([]*InodeChange, len(lines)) + + for i, line := range lines { + c, err := parseInodeChange(line) + if err != nil { + return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line) + } + changes[i] = c + } + return changes, nil +} + +func listByType(t, filter string) ([]*Dataset, error) { + args := []string{"list", "-rHp", "-t", t, "-o", dsPropListOptions} + + if filter != "" { + args = append(args, filter) + } + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return datasets, nil +} + +func propsSlice(properties map[string]string) []string { + args := make([]string, 0, len(properties)*3) + for k, v := range properties { + args = append(args, "-o") + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + return args +} + +func (z *Zpool) parseLine(line []string) error { + prop := line[1] + val := line[2] + + var err error + + switch prop { + case "name": + setString(&z.Name, val) + case "health": + setString(&z.Health, val) + case "allocated": + err = setUint(&z.Allocated, val) + case "size": + err = setUint(&z.Size, val) + case "free": + err = setUint(&z.Free, val) + case "fragmentation": + // Trim trailing "%" before parsing uint + err = setUint(&z.Fragmentation, val[:len(val)-1]) + case "readonly": + z.ReadOnly = val == "on" + case "freeing": + err = setUint(&z.Freeing, val) + case "leaked": + err = setUint(&z.Leaked, val) + case "dedupratio": + // Trim trailing "x" before parsing float64 + z.DedupRatio, err = strconv.ParseFloat(val[:len(val)-1], 64) + } + return err +} diff --git a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go new file mode 100644 index 0000000..b2b0cd9 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go @@ -0,0 +1,17 @@ +// +build !solaris + +package zfs + +import ( + "strings" +) + +// List of ZFS properties to retrieve from zfs list command on a non-Solaris platform +var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "written", "logicalused", "usedbydataset"} + +var dsPropListOptions = strings.Join(dsPropList, ",") + +// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform +var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} +var zpoolPropListOptions = strings.Join(zpoolPropList, ",") +var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/utils_solaris.go new file mode 100644 index 0000000..4b0a9e9 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/utils_solaris.go @@ -0,0 +1,17 @@ +// +build solaris + +package zfs + +import ( + "strings" +) + +// List of ZFS properties to retrieve from zfs list command on a Solaris platform +var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota"} + +var dsPropListOptions = strings.Join(dsPropList, ",") + +// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform +var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} +var zpoolPropListOptions = strings.Join(zpoolPropList, ",") +var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/github.com/mistifyio/go-zfs/zfs.go b/vendor/github.com/mistifyio/go-zfs/zfs.go new file mode 100644 index 0000000..615337d --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/zfs.go @@ -0,0 +1,451 @@ +// Package zfs provides wrappers around the ZFS command line tools. +package zfs + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ZFS dataset types, which can indicate if a dataset is a filesystem, +// snapshot, or volume. +const ( + DatasetFilesystem = "filesystem" + DatasetSnapshot = "snapshot" + DatasetVolume = "volume" +) + +// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, +// or volume. The Type struct member can be used to determine a dataset's type. +// +// The field definitions can be found in the ZFS manual: +// http://www.freebsd.org/cgi/man.cgi?zfs(8). +type Dataset struct { + Name string + Origin string + Used uint64 + Avail uint64 + Mountpoint string + Compression string + Type string + Written uint64 + Volsize uint64 + Logicalused uint64 + Usedbydataset uint64 + Quota uint64 +} + +// InodeType is the type of inode as reported by Diff +type InodeType int + +// Types of Inodes +const ( + _ = iota // 0 == unknown type + BlockDevice InodeType = iota + CharacterDevice + Directory + Door + NamedPipe + SymbolicLink + EventPort + Socket + File +) + +// ChangeType is the type of inode change as reported by Diff +type ChangeType int + +// Types of Changes +const ( + _ = iota // 0 == unknown type + Removed ChangeType = iota + Created + Modified + Renamed +) + +// DestroyFlag is the options flag passed to Destroy +type DestroyFlag int + +// Valid destroy options +const ( + DestroyDefault DestroyFlag = 1 << iota + DestroyRecursive = 1 << iota + DestroyRecursiveClones = 1 << iota + DestroyDeferDeletion = 1 << iota + DestroyForceUmount = 1 << iota +) + +// InodeChange represents a change as reported by Diff +type InodeChange struct { + Change ChangeType + Type InodeType + Path string + NewPath string + ReferenceCountChange int +} + +// Logger can be used to log commands/actions +type Logger interface { + Log(cmd []string) +} + +type defaultLogger struct{} + +func (*defaultLogger) Log(cmd []string) { + return +} + +var logger Logger = &defaultLogger{} + +// SetLogger set a log handler to log all commands including arguments before +// they are executed +func SetLogger(l Logger) { + if l != nil { + logger = l + } +} + +// zfs is a helper function to wrap typical calls to zfs. +func zfs(arg ...string) ([][]string, error) { + c := command{Command: "zfs"} + return c.Run(arg...) +} + +// Datasets returns a slice of ZFS datasets, regardless of type. +// A filter argument may be passed to select a dataset with the matching name, +// or empty string ("") may be used to select all datasets. +func Datasets(filter string) ([]*Dataset, error) { + return listByType("all", filter) +} + +// Snapshots returns a slice of ZFS snapshots. +// A filter argument may be passed to select a snapshot with the matching name, +// or empty string ("") may be used to select all snapshots. +func Snapshots(filter string) ([]*Dataset, error) { + return listByType(DatasetSnapshot, filter) +} + +// Filesystems returns a slice of ZFS filesystems. +// A filter argument may be passed to select a filesystem with the matching name, +// or empty string ("") may be used to select all filesystems. +func Filesystems(filter string) ([]*Dataset, error) { + return listByType(DatasetFilesystem, filter) +} + +// Volumes returns a slice of ZFS volumes. +// A filter argument may be passed to select a volume with the matching name, +// or empty string ("") may be used to select all volumes. +func Volumes(filter string) ([]*Dataset, error) { + return listByType(DatasetVolume, filter) +} + +// GetDataset retrieves a single ZFS dataset by name. This dataset could be +// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. +func GetDataset(name string) (*Dataset, error) { + out, err := zfs("list", "-Hp", "-o", dsPropListOptions, name) + if err != nil { + return nil, err + } + + ds := &Dataset{Name: name} + for _, line := range out { + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return ds, nil +} + +// Clone clones a ZFS snapshot and returns a clone dataset. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) { + if d.Type != DatasetSnapshot { + return nil, errors.New("can only clone snapshots") + } + args := make([]string, 2, 4) + args[0] = "clone" + args[1] = "-p" + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, []string{d.Name, dest}...) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(dest) +} + +// Unmount unmounts currently mounted ZFS file systems. +func (d *Dataset) Unmount(force bool) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot unmount snapshots") + } + args := make([]string, 1, 3) + args[0] = "umount" + if force { + args = append(args, "-f") + } + args = append(args, d.Name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// Mount mounts ZFS file systems. +func (d *Dataset) Mount(overlay bool, options []string) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot mount snapshots") + } + args := make([]string, 1, 5) + args[0] = "mount" + if overlay { + args = append(args, "-O") + } + if options != nil { + args = append(args, "-o") + args = append(args, strings.Join(options, ",")) + } + args = append(args, d.Name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a +// new snapshot with the specified name, and streams the input data into the +// newly-created snapshot. +func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) { + c := command{Command: "zfs", Stdin: input} + _, err := c.Run("receive", name) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) SendSnapshot(output io.Writer) error { + if d.Type != DatasetSnapshot { + return errors.New("can only send snapshots") + } + + c := command{Command: "zfs", Stdout: output} + _, err := c.Run("send", d.Name) + return err +} + +// CreateVolume creates a new ZFS volume with the specified name, size, and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) { + args := make([]string, 4, 5) + args[0] = "create" + args[1] = "-p" + args[2] = "-V" + args[3] = strconv.FormatUint(size, 10) + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any +// descendents of the dataset will be recursively destroyed, including snapshots. +// If the deferred bit flag is set, the snapshot is marked for deferred +// deletion. +func (d *Dataset) Destroy(flags DestroyFlag) error { + args := make([]string, 1, 3) + args[0] = "destroy" + if flags&DestroyRecursive != 0 { + args = append(args, "-r") + } + + if flags&DestroyRecursiveClones != 0 { + args = append(args, "-R") + } + + if flags&DestroyDeferDeletion != 0 { + args = append(args, "-d") + } + + if flags&DestroyForceUmount != 0 { + args = append(args, "-f") + } + + args = append(args, d.Name) + _, err := zfs(args...) + return err +} + +// SetProperty sets a ZFS property on the receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) SetProperty(key, val string) error { + prop := strings.Join([]string{key, val}, "=") + _, err := zfs("set", prop, d.Name) + return err +} + +// GetProperty returns the current value of a ZFS property from the +// receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) GetProperty(key string) (string, error) { + out, err := zfs("get", key, d.Name) + if err != nil { + return "", err + } + + return out[0][2], nil +} + +// Rename renames a dataset. +func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshots bool) (*Dataset, error) { + args := make([]string, 3, 5) + args[0] = "rename" + args[1] = d.Name + args[2] = name + if createParent { + args = append(args, "-p") + } + if recursiveRenameSnapshots { + args = append(args, "-r") + } + _, err := zfs(args...) + if err != nil { + return d, err + } + + return GetDataset(name) +} + +// Snapshots returns a slice of all ZFS snapshots of a given dataset. +func (d *Dataset) Snapshots() ([]*Dataset, error) { + return Snapshots(d.Name) +} + +// CreateFilesystem creates a new ZFS filesystem with the specified name and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "create" + + if properties != nil { + args = append(args, propsSlice(properties)...) + } + + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Snapshot creates a new ZFS snapshot of the receiving dataset, using the +// specified name. Optionally, the snapshot can be taken recursively, creating +// snapshots of all descendent filesystems in a single, atomic operation. +func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "snapshot" + if recursive { + args = append(args, "-r") + } + snapName := fmt.Sprintf("%s@%s", d.Name, name) + args = append(args, snapName) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(snapName) +} + +// Rollback rolls back the receiving ZFS dataset to a previous snapshot. +// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot +// rollback cannot be completed without this option, if more recent +// snapshots exist. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Rollback(destroyMoreRecent bool) error { + if d.Type != DatasetSnapshot { + return errors.New("can only rollback snapshots") + } + + args := make([]string, 1, 3) + args[0] = "rollback" + if destroyMoreRecent { + args = append(args, "-r") + } + args = append(args, d.Name) + + _, err := zfs(args...) + return err +} + +// Children returns a slice of children of the receiving ZFS dataset. +// A recursion depth may be specified, or a depth of 0 allows unlimited +// recursion. +func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { + args := []string{"list"} + if depth > 0 { + args = append(args, "-d") + args = append(args, strconv.FormatUint(depth, 10)) + } else { + args = append(args, "-r") + } + args = append(args, "-t", "all", "-Hp", "-o", dsPropListOptions) + args = append(args, d.Name) + + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + return datasets[1:], nil +} + +// Diff returns changes between a snapshot and the given ZFS dataset. +// The snapshot name must include the filesystem part as it is possible to +// compare clones with their origin snapshots. +func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) { + args := []string{"diff", "-FH", snapshot, d.Name}[:] + out, err := zfs(args...) + if err != nil { + return nil, err + } + inodeChanges, err := parseInodeChanges(out) + if err != nil { + return nil, err + } + return inodeChanges, nil +} diff --git a/vendor/github.com/mistifyio/go-zfs/zpool.go b/vendor/github.com/mistifyio/go-zfs/zpool.go new file mode 100644 index 0000000..d8db945 --- /dev/null +++ b/vendor/github.com/mistifyio/go-zfs/zpool.go @@ -0,0 +1,112 @@ +package zfs + +// ZFS zpool states, which can indicate if a pool is online, offline, +// degraded, etc. More information regarding zpool states can be found here: +// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html. +const ( + ZpoolOnline = "ONLINE" + ZpoolDegraded = "DEGRADED" + ZpoolFaulted = "FAULTED" + ZpoolOffline = "OFFLINE" + ZpoolUnavail = "UNAVAIL" + ZpoolRemoved = "REMOVED" +) + +// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can +// contain many descendent datasets. +type Zpool struct { + Name string + Health string + Allocated uint64 + Size uint64 + Free uint64 + Fragmentation uint64 + ReadOnly bool + Freeing uint64 + Leaked uint64 + DedupRatio float64 +} + +// zpool is a helper function to wrap typical calls to zpool. +func zpool(arg ...string) ([][]string, error) { + c := command{Command: "zpool"} + return c.Run(arg...) +} + +// GetZpool retrieves a single ZFS zpool by name. +func GetZpool(name string) (*Zpool, error) { + args := zpoolArgs + args = append(args, name) + out, err := zpool(args...) + if err != nil { + return nil, err + } + + // there is no -H + out = out[1:] + + z := &Zpool{Name: name} + for _, line := range out { + if err := z.parseLine(line); err != nil { + return nil, err + } + } + + return z, nil +} + +// Datasets returns a slice of all ZFS datasets in a zpool. +func (z *Zpool) Datasets() ([]*Dataset, error) { + return Datasets(z.Name) +} + +// Snapshots returns a slice of all ZFS snapshots in a zpool. +func (z *Zpool) Snapshots() ([]*Dataset, error) { + return Snapshots(z.Name) +} + +// CreateZpool creates a new ZFS zpool with the specified name, properties, +// and optional arguments. +// A full list of available ZFS properties and command-line arguments may be +// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) { + cli := make([]string, 1, 4) + cli[0] = "create" + if properties != nil { + cli = append(cli, propsSlice(properties)...) + } + cli = append(cli, name) + cli = append(cli, args...) + _, err := zpool(cli...) + if err != nil { + return nil, err + } + + return &Zpool{Name: name}, nil +} + +// Destroy destroys a ZFS zpool by name. +func (z *Zpool) Destroy() error { + _, err := zpool("destroy", z.Name) + return err +} + +// ListZpools list all ZFS zpools accessible on the current system. +func ListZpools() ([]*Zpool, error) { + args := []string{"list", "-Ho", "name"} + out, err := zpool(args...) + if err != nil { + return nil, err + } + + var pools []*Zpool + + for _, line := range out { + z, err := GetZpool(line[0]) + if err != nil { + return nil, err + } + pools = append(pools, z) + } + return pools, nil +} diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 0000000..0ea3ff8 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 0000000..e26cd4f --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 0000000..25aac34 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,104 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. + +# What is a digest? + +A digest is just a hash. + +The most common use case for a digest is to create a content +identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) +systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify +the byte slice "my content". This allows two disparate applications +to agree on a verifiable identifier without having to trust one +another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` +makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this +can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is +considered the best resource, a few important items need to be called +out when using this package. + +1. Make sure to import the hash implementations into your application + or the package will panic. You should have something like the + following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemable as a string, _always_ + verify your input with `digest.Parse` or use `Digest.Validate` + when accepting untrusted input. While there are measures to + avoid common problems, this will ensure you have valid digests + in the rest of your application. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). + +# Contributing + +This package is considered fairly complete. It has been in production +in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. If you think there is a +missing feature, please file a bug clearly describing the problem and +the alternatives you tried before submitting a PR. + +# Reporting security issues + +Please DO NOT file a public issue, instead send your report privately to +security@opencontainers.org. + +The maintainers take security seriously. If you discover a security issue, +please bring it to their attention right away! + +If you are reporting a security issue, do not create an issue or file a pull +request on GitHub. Instead, disclose the issue responsibly by sending an email +to security@opencontainers.org (which is inhabited only by the maintainers of +the various OCI projects). + +# Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/opencontainers/go-digest/algorithm.go similarity index 56% rename from vendor/github.com/docker/distribution/digest/digester.go rename to vendor/github.com/opencontainers/go-digest/algorithm.go index f3105a4..8813bd2 100644 --- a/vendor/github.com/docker/distribution/digest/digester.go +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package digest import ( @@ -5,6 +19,7 @@ import ( "fmt" "hash" "io" + "regexp" ) // Algorithm identifies and implementation of a digester by an identifier. @@ -14,9 +29,9 @@ type Algorithm string // supported digest types const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage @@ -36,10 +51,18 @@ var ( SHA384: crypto.SHA384, SHA512: crypto.SHA512, } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } ) // Available returns true if the digest type is available for use. If this -// returns false, New and Hash will return nil. +// returns false, Digester and Hash will return nil. func (a Algorithm) Available() bool { h, ok := algorithms[a] if !ok { @@ -72,13 +95,17 @@ func (a *Algorithm) Set(value string) error { *a = Algorithm(value) } + if !a.Available() { + return ErrDigestUnsupported + } + return nil } -// New returns a new digester for the specified algorithm. If the algorithm +// Digester returns a new digester for the specified algorithm. If the algorithm // does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling New. -func (a Algorithm) New() Digester { +// checked by calling Available before calling Digester. +func (a Algorithm) Digester() Digester { return &digester{ alg: a, hash: a.Hash(), @@ -89,6 +116,11 @@ func (a Algorithm) New() Digester { // method will panic. Check Algorithm.Available() before calling. func (a Algorithm) Hash() hash.Hash { if !a.Available() { + // Empty algorithm string is invalid + if a == "" { + panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) + } + // NOTE(stevvooe): A missing hash is usually a programming error that // must be resolved at compile time. We don't import in the digest // package to allow users to choose their hash implementation (such as @@ -102,9 +134,17 @@ func (a Algorithm) Hash() hash.Hash { return algorithms[a].New() } +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + // FromReader returns the digest of the reader using the algorithm. func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.New() + digester := a.Digester() if _, err := io.Copy(digester.Hash(), rd); err != nil { return "", err @@ -115,7 +155,7 @@ func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { // FromBytes digests the input and returns a Digest. func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.New() + digester := a.Digester() if _, err := digester.Hash().Write(p); err != nil { // Writes to a Hash should never fail. None of the existing @@ -129,27 +169,24 @@ func (a Algorithm) FromBytes(p []byte) Digest { return digester.Digest() } -// TODO(stevvooe): Allow resolution of verifiers using the digest type and -// this registration system. - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash +// FromString digests the string input and returns a Digest. +func (a Algorithm) FromString(s string) Digest { + return a.FromBytes([]byte(s)) } -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat } diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go similarity index 56% rename from vendor/github.com/docker/distribution/digest/digest.go rename to vendor/github.com/opencontainers/go-digest/digest.go index 31d821b..ad398cb 100644 --- a/vendor/github.com/docker/distribution/digest/digest.go +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package digest import ( @@ -8,11 +22,6 @@ import ( "strings" ) -const ( - // DigestSha256EmptyTar is the canonical sha256 digest of empty data - DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - // Digest allows simple protection of hex formatted digest strings, prefixed // by their algorithm. Strings of type Digest have some guarantee of being in // the correct format and it provides quick access to the components of a @@ -36,16 +45,21 @@ func NewDigest(alg Algorithm, h hash.Hash) Digest { // functions. This is also useful for rebuilding digests from binary // serializations. func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, p)) + return NewDigestFromEncoded(alg, alg.Encode(p)) } -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) } // DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) // DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) @@ -61,16 +75,14 @@ var ( ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") ) -// ParseDigest parses s and returns the validated digest object. An error will +// Parse parses s and returns the validated digest object. An error will // be returned if the format is invalid. -func ParseDigest(s string) (Digest, error) { +func Parse(s string) (Digest, error) { d := Digest(s) - return d, d.Validate() } -// FromReader returns the most valid digest for the underlying content using -// the canonical digest algorithm. +// FromReader consumes the content of rd until io.EOF, returning canonical digest. func FromReader(rd io.Reader) (Digest, error) { return Canonical.FromReader(rd) } @@ -80,36 +92,27 @@ func FromBytes(p []byte) Digest { return Canonical.FromBytes(p) } +// FromString digests the input and returns a Digest. +func FromString(s string) Digest { + return Canonical.FromString(s) +} + // Validate checks that the contents of d is a valid digest, returning an // error if not. func (d Digest) Validate() error { s := string(d) - - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - i := strings.Index(s, ":") - if i < 0 { + if i <= 0 || i+1 == len(s) { return ErrDigestInvalidFormat } - - // case: "sha256:" with no hex. - if i+1 == len(s) { - return ErrDigestInvalidFormat - } - - switch algorithm := Algorithm(s[:i]); algorithm { - case SHA256, SHA384, SHA512: - if algorithm.Size()*2 != len(s[i+1:]) { - return ErrDigestInvalidLength + algorithm, encoded := Algorithm(s[:i]), s[i+1:] + if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat } - break - default: return ErrDigestUnsupported } - - return nil + return algorithm.Validate(encoded) } // Algorithm returns the algorithm portion of the digest. This will panic if @@ -118,12 +121,26 @@ func (d Digest) Algorithm() Algorithm { return Algorithm(d[:d.sepIndex()]) } -// Hex returns the hex digest portion of the digest. This will panic if the +// Verifier returns a writer object that can be used to verify a stream of +// content against the digest. If the digest is invalid, the method will panic. +func (d Digest) Verifier() Verifier { + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + } +} + +// Encoded returns the encoded portion of the digest. This will panic if the // underlying digest is not in a valid format. -func (d Digest) Hex() string { +func (d Digest) Encoded() string { return string(d[d.sepIndex()+1:]) } +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + func (d Digest) String() string { return string(d) } @@ -132,7 +149,7 @@ func (d Digest) sepIndex() int { i := strings.Index(string(d), ":") if i < 0 { - panic("could not find ':' in digest: " + d) + panic(fmt.Sprintf("no ':' separator in digest %q", d)) } return i diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go new file mode 100644 index 0000000..36fa272 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -0,0 +1,39 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import "hash" + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go similarity index 73% rename from vendor/github.com/docker/distribution/digest/doc.go rename to vendor/github.com/opencontainers/go-digest/doc.go index f64b0db..491ea1e 100644 --- a/vendor/github.com/docker/distribution/digest/doc.go +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go similarity index 54% rename from vendor/github.com/docker/distribution/digest/verifiers.go rename to vendor/github.com/opencontainers/go-digest/verifiers.go index 9af3be1..32125e9 100644 --- a/vendor/github.com/docker/distribution/digest/verifiers.go +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package digest import ( @@ -17,19 +31,6 @@ type Verifier interface { Verified() bool } -// NewDigestVerifier returns a verifier that compares the written bytes -// against a passed in digest. -func NewDigestVerifier(d Digest) (Verifier, error) { - if err := d.Validate(); err != nil { - return nil, err - } - - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - }, nil -} - type hashVerifier struct { digest Digest hash hash.Hash diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 0000000..9fdc20f --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/README.md b/vendor/github.com/opencontainers/image-spec/README.md new file mode 100644 index 0000000..31e8278 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/README.md @@ -0,0 +1,167 @@ +# OCI Image Format Specification +
+ + + +
+ +The OCI Image Format project creates and maintains the software shipping container image format spec (OCI Image Format). + +**[The specification can be found here](spec.md).** + +This repository also provides [Go types](specs-go), [intra-blob validation tooling, and JSON Schema](schema). +The Go types and validation should be compatible with the current Go release; earlier Go releases are not supported. + +Additional documentation about how this group operates: + +- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md) +- [Roadmap](#roadmap) +- [Releases](RELEASES.md) +- [Project Documentation](project.md) + +The _optional_ and _base_ layers of all OCI projects are tracked in the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). + +## Running an OCI Image + +The OCI Image Format partner project is the [OCI Runtime Spec project](https://github.com/opencontainers/runtime-spec). +The Runtime Specification outlines how to run a "[filesystem bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md)" that is unpacked on disk. +At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle. +At this point the OCI Runtime Bundle would be run by an OCI Runtime. + +This entire workflow supports the UX that users have come to expect from container engines like Docker and rkt: primarily, the ability to run an image with no additional arguments: + +* docker run example.com/org/app:v1.0.0 +* rkt run example.com/org/app,version=v1.0.0 + +To support this UX the OCI Image Format contains sufficient information to launch the application on the target platform (e.g. command, arguments, environment variables, etc). + +## FAQ + +**Q: Why doesn't this project mention distribution?** + +A: Distribution, for example using HTTP as both Docker v2.2 and AppC do today, is currently out of scope on the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). +There has been [some discussion on the TOB mailing list](https://groups.google.com/a/opencontainers.org/d/msg/tob/A3JnmI-D-6Y/tLuptPDHAgAJ) to make distribution an optional layer, but this topic is a work in progress. + +**Q: What happens to AppC or Docker Image Formats?** + +A: Existing formats can continue to be a proving ground for technologies, as needed. +The OCI Image Format project strives to provide a dependable open specification that can be shared between different tools and be evolved for years or decades of compatibility; as the deb and rpm format have. + +Find more [FAQ on the OCI site](https://www.opencontainers.org/faq). + +## Roadmap + +The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the future improvements. + +# Contributing + +Development happens on GitHub for the spec. +Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). + +The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository. + +## Discuss your design + +The project welcomes submissions, but please let everyone know what you are working on. + +Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do. +This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits. +It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions. + +Typos and grammatical errors can go straight to a pull-request. +When in doubt, start on the [mailing-list](#mailing-list). + +## Weekly Call + +The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific). +Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1-415-968-0849 (no PIN needed). +An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there. +Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes]. + +## Mailing List + +You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev). + +## IRC + +OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]). + +## Markdown style + +To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line. +This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length. +For example, this paragraph will span three lines in the Markdown source. + +## Git commit + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. +The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +### Commit Style + +Simple house-keeping for clean git history. +Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit). + +1. Separate the subject from body with a blank line +2. Limit the subject line to 50 characters +3. Capitalize the subject line +4. Do not end the subject line with a period +5. Use the imperative mood in the subject line +6. Wrap the body at 72 characters +7. Use the body to explain what and why vs. how + * If there was important/useful/essential conversation or information, copy or include a reference +8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...") + + +[UberConference]: https://www.uberconference.com/opencontainers +[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ +[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/ diff --git a/vendor/github.com/opencontainers/image-spec/schema/doc.go b/vendor/github.com/opencontainers/image-spec/schema/doc.go new file mode 100644 index 0000000..5ea5914 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/schema/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package schema defines the OCI image media types, schema definitions and validation functions. +package schema diff --git a/vendor/github.com/opencontainers/image-spec/schema/error.go b/vendor/github.com/opencontainers/image-spec/schema/error.go new file mode 100644 index 0000000..8b0bfc2 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/schema/error.go @@ -0,0 +1,44 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "encoding/json" + "io" + + "go4.org/errorutil" +) + +// A SyntaxError is a description of a JSON syntax error +// including line, column and offset in the JSON file. +type SyntaxError struct { + msg string + Line, Col int + Offset int64 +} + +func (e *SyntaxError) Error() string { return e.msg } + +// WrapSyntaxError checks whether the given error is a *json.SyntaxError +// and converts it into a *schema.SyntaxError containing line/col information using the given reader. +// If the given error is not a *json.SyntaxError it is returned unchanged. +func WrapSyntaxError(r io.Reader, err error) error { + if serr, ok := err.(*json.SyntaxError); ok { + line, col, _ := errorutil.HighlightBytePosition(r, serr.Offset) + return &SyntaxError{serr.Error(), line, col, serr.Offset} + } + + return err +} diff --git a/vendor/github.com/opencontainers/image-spec/schema/fs.go b/vendor/github.com/opencontainers/image-spec/schema/fs.go new file mode 100644 index 0000000..5baeeae --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/schema/fs.go @@ -0,0 +1,323 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "esc -private -pkg=schema -include=.*\.json$ ."; DO NOT EDIT. + +package schema + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "io/ioutil" + "net/http" + "os" + "path" + "sync" + "time" +) + +type _escLocalFS struct{} + +var _escLocal _escLocalFS + +type _escStaticFS struct{} + +var _escStatic _escStaticFS + +type _escDirectory struct { + fs http.FileSystem + name string +} + +type _escFile struct { + compressed string + size int64 + modtime int64 + local string + isDir bool + + once sync.Once + data []byte + name string +} + +func (_escLocalFS) Open(name string) (http.File, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + return os.Open(f.local) +} + +func (_escStaticFS) prepare(name string) (*_escFile, error) { + f, present := _escData[path.Clean(name)] + if !present { + return nil, os.ErrNotExist + } + var err error + f.once.Do(func() { + f.name = path.Base(name) + if f.size == 0 { + return + } + var gr *gzip.Reader + b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) + gr, err = gzip.NewReader(b64) + if err != nil { + return + } + f.data, err = ioutil.ReadAll(gr) + }) + if err != nil { + return nil, err + } + return f, nil +} + +func (fs _escStaticFS) Open(name string) (http.File, error) { + f, err := fs.prepare(name) + if err != nil { + return nil, err + } + return f.File() +} + +func (dir _escDirectory) Open(name string) (http.File, error) { + return dir.fs.Open(dir.name + name) +} + +func (f *_escFile) File() (http.File, error) { + type httpFile struct { + *bytes.Reader + *_escFile + } + return &httpFile{ + Reader: bytes.NewReader(f.data), + _escFile: f, + }, nil +} + +func (f *_escFile) Close() error { + return nil +} + +func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { + return nil, nil +} + +func (f *_escFile) Stat() (os.FileInfo, error) { + return f, nil +} + +func (f *_escFile) Name() string { + return f.name +} + +func (f *_escFile) Size() int64 { + return f.size +} + +func (f *_escFile) Mode() os.FileMode { + return 0 +} + +func (f *_escFile) ModTime() time.Time { + return time.Unix(f.modtime, 0) +} + +func (f *_escFile) IsDir() bool { + return f.isDir +} + +func (f *_escFile) Sys() interface{} { + return f +} + +// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true, +// the filesystem's contents are instead used. +func _escFS(useLocal bool) http.FileSystem { + if useLocal { + return _escLocal + } + return _escStatic +} + +// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir. +// If useLocal is true, the filesystem's contents are instead used. +func _escDir(useLocal bool, name string) http.FileSystem { + if useLocal { + return _escDirectory{fs: _escLocal, name: name} + } + return _escDirectory{fs: _escStatic, name: name} +} + +// _escFSByte returns the named file from the embedded assets. If useLocal is +// true, the filesystem's contents are instead used. +func _escFSByte(useLocal bool, name string) ([]byte, error) { + if useLocal { + f, err := _escLocal.Open(name) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + _ = f.Close() + return b, err + } + f, err := _escStatic.prepare(name) + if err != nil { + return nil, err + } + return f.data, nil +} + +// _escFSMustByte is the same as _escFSByte, but panics if name is not present. +func _escFSMustByte(useLocal bool, name string) []byte { + b, err := _escFSByte(useLocal, name) + if err != nil { + panic(err) + } + return b +} + +// _escFSString is the string version of _escFSByte. +func _escFSString(useLocal bool, name string) (string, error) { + b, err := _escFSByte(useLocal, name) + return string(b), err +} + +// _escFSMustString is the string version of _escFSMustByte. +func _escFSMustString(useLocal bool, name string) string { + return string(_escFSMustByte(useLocal, name)) +} + +var _escData = map[string]*_escFile{ + + "/config-schema.json": { + local: "config-schema.json", + size: 2771, + modtime: 1515512099, + compressed: ` +H4sIAAAAAAAC/+RWQY/TPBC951dE2T22m+/wnXot3JCKVAGHFarcZNLOEnvMeIKIUP87itNCkjpp6apc +OEUaz7z35nns+EcUx0kOLmO0gmSSRZysLJglGVFogOMlmQJ38dpChgVmymfNmrJHl+1Bq6ZkL2IXafri +yMzb6BPxLs1ZFTL/7/+0jT20dZifStwiTcmCyU5szpe12SlqtYM08/xtpdQWmlravkAmbcwyWWBBcMki +btqJ4yRjUAL5r0Cn1AmjaeF8vCDWSpqVXAnMBTUkfu3QpiSqkj3xBFQ/m7M9CmRSMVxbQ+7azKMXgeyO +Iz4ecMXHPzjgXmSEscPqc95+t+Qgf08sblj/yFB4A6FwT80IPKQ5FGiwGRWXamXXHnnVagzjm29jshSz +qpNZdwkF9FDGRCNxfBghFa4toZEhNxlYNT099wj6dJMSJ2RekNqXO5A8qcJUZdlH6uJ8Dlqw1Pk/2/tH +KisN7sb+b536e3f1ifgLmt0bvOmcv1NbKO9tyTqw8fe0ZC1k17gzqrzakqj7PV2/TCSFe831m2NRbDB3 +f/+uO+ZPdd+jBVPpsx1PSlUDuyTseDRgTRi+Vsj+P/wc8GCoLuoinjzfoxPiOmR636yAUWPbM75BwbfD +Zbem3hGB8T5/U1ze1FlA42ZbvwKDtIazP98fAIC2Um/8RIyDbIlKUGZkPvunLDoynM9N/1n1+9nUP5dR +MzuH6GcAAAD//0pj2wvTCgAA +`, + }, + + "/content-descriptor.json": { + local: "content-descriptor.json", + size: 1079, + modtime: 1537191585, + compressed: ` +H4sIAAAAAAAC/5yTsW7cMAyGdz8F4QTIkos6BB2MIEu7d2i3ooNOok5Mz5JK8RBci7x7QcvX2G2RILfZ +xP+Rn2zqVwfQe6yOqQjl1A/QfyqYPuQklhIy6BMmgY9zKDN8LugokLMTca0tLquLOFrFo0gZjHmoOW1a +9Sbzzni2QTbvbk2rXTSO/AmpgzG5YHKnyXXCWtr4P9MbJ8eCSubtAzpptcK5IAth7QfQgwH0I3qyX1q4 +lf49r0SEKadNIQfQAmNAxuTQw2LGhF8yBuU8hrp5FrvRE18Yj4ESae9qnqdP7FNr0Vf6+ZqPRoASbI+C +9Y1O/xGhJO9v1xKedljlFQ3HxyJ5x7ZEcuAiuu/1MEJjT1rN5Vp19bVYEeQEV3d2v8tMEsf74U5/rEd/ +f3XOd5xdV/4H3tcX7C3sqSlqEALnER4juQgSqc7OMNojbBF8fkz7bD36c+wmk5WbTSnLdDtWim9fdrPs +dIbaEm+G3WzZM/44EKMqff37riz3dL0uHcC37qn7HQAA//9DKIMKNwQAAA== +`, + }, + + "/defs-descriptor.json": { + local: "defs-descriptor.json", + size: 844, + modtime: 1537191664, + compressed: ` +H4sIAAAAAAAC/5SST2/TTBDG7/kU826jt0DiOHBAqlWKKnrnUE6t0mi6O7aneP9od6IqVPnuaG03SYtA +cLC1+2jmefwbz9MEQBlKOnIQ9k5VoK6oZsf5liBgFNabDiOIh6+B3BfvBNlRhKuxzUe4DqS5Zo29x3ww +3buoCnIOgLJkGL9tA+0lAMUmp7YiIVVl6QM5/ZyRFj42ZdItWSzZYkOl2aeWB7f5s5cM3ipJZNcc9IAi +FHu8u9vL4gaLH8vibHU4/ncy/b+4Wy9mq6fl/P2Hj7vy78qmqo/YDUnKcENJjuleDVdaAh23QXwTMbSs +Qbekv6eNhaEXfA25yN8/kJY5sOuvIwCcnmPX+MjS2ovqPI/KkLk4/ccJjFyzN5+r29liXaz2ytt3VT5f +FjfL4uzTuljNXhFuYpf+wIfQ8QCRC6GO3sJjy7oFaTmNVGBxC/cExj+6zqMh8+v3Y4y4PcgsZI9zf08K +oGofLea/oDaR1ajvXmCgc17w5XoCqGmkOvcZqtPiIXl3Uh4tcmkxXPdpw3uczCQ/u8nPAAAA///5nDLG +TAMAAA== +`, + }, + + "/defs.json": { + local: "defs.json", + size: 1670, + modtime: 1515512099, + compressed: ` +H4sIAAAAAAAC/7STza6bMBCF9zzFyO2S9oJtbGDb7hMpy6oLSiaJq2AjY6RWEe9e8RNChFuJKneRgGc8 +3zmeMbcAgByxKa2qnTKa5EC+4klp1a8aaBs8grtY054vpnXgLgi7GvUXo12hNFo41FiqkyqLoTwceTOA +5NBLABClXTqvAIj7XWOvprTDM9qhckhUSquqrUgOn2KaPsLFrykcUzkEu3Amx2IrmlEpfPA+vsIzuhVP +Yy55ygT3aczJlZDgW4UyShmTNGIiTbiUIooij6Jn15N0+x/T8enQJFlxN8/GBxZJwtbozXPxoTnNeCYk +zdb8zePw8eOUcyE5jySTUZYk1Nf8WOxNz7VLQaNxdyI5fJsCMKeG9EeLfZZ8eFt8cG9Ty+eNXeivvp9G +t9frYvf09t3Ti1c6FPy1DhtnlT5vd3jXGOtf66kq6sOAHf99V8n8+Imle9ykunAOrd5bU6N1CptFEQD5 +fIvD7in0ryMEy+fK1G6UfmdTE+tvpoL+1wV/AgAA//96IpqyhgYAAA== +`, + }, + + "/image-index-schema.json": { + local: "image-index-schema.json", + size: 2993, + modtime: 1515512099, + compressed: ` +H4sIAAAAAAAC/6yWz0/jOhDH7/0rRgGJC5CnJ/QOFeLy9sJpD4v2suJg7EkybGNnx1Ogu+r/vrJN2qRJ +C4Te2rHnO5/vxL/+zAAyg14zNULOZnPIvjZo/3dWFFlkuK1ViXBrDb7AtwY1FaRVnHoeck+9rrBWIa8S +aeZ5/uidvUjRS8dlblgVcvHPVZ5iJymPTJvi53nuGrS6LeljWpqdUyifUyifEmXVYEh1D4+oJcUadg2y +EPpsDsESQJbyvyP7ZCuFh27vKvJQEC4M+GQPPUiFECtDrAxJDJ6SGigPygJZwRI5IkTlCZ7yPuZGqnU5 +qFGTpXpZZ3P4dxtTL20shtZpJKuVpQK9+K79Vlkxq1WHXbDuzvuwnbbYl9f2ui30+Fd7HWH8tSTGUOvH +Jhrg0ZC6C2nn3bCn3zsRQyV6yTah+474yMIYyPcHhgskrIU4O3gAV8TFwVggo9VoYGApipwyFiHbYOEv +zKYnl2F3nOQGC7IUKvh8S9JRWA9Nv4czTASy8LAS9JNYRwDJyn9X++Fe+/8ePM2rRlzJqqlIg65Q//TL +GpJCi5sYz4ON8LdRIsgWzq7VonRMUtU38+uwFg2am7Ppfd9dN7u+lrzwb7pSsKCEHqZDwa6G54p0BRLO +leQFarWCBwTjnu3CKYNmOnWk2svcLJQUjush98c280Znh3PvNj60leOYYl2RoJYl404eQOZ6nnp7+PA+ +HmoPxye7zw9Cd9rhhcmW2c6E9ZjNY+I5fxyoy6fBLXkMuI3scSALVOE7HLuFW90DmP3Lslt2cG2+2yTA ++k3bT4pJWRm3/EYPZ/v+9Y8MZa2T+KDznz01tgdX3lWdfNZ1RWZjXtpf696zZ9zRpNfZmI3PGAigEXN4 +VmZjL8HOE24GcD9bz/4GAAD//yCnv52xCwAA +`, + }, + + "/image-layout-schema.json": { + local: "image-layout-schema.json", + size: 439, + modtime: 1515512099, + compressed: ` +H4sIAAAAAAAC/2yPQUvEMBCF7/0VQ/Sg4DYVPOW6pwVhD4IX8VDTaTvLNonJVFik/12SaRXRU5g38+W9 +91kBqA6TjRSYvFMG1DGg23vHLTmMcJjaAeGxvfiZ4cmOOLXqLlPXSQYDamQORutT8m4nau3joLvY9rxr +HrRoV8JRtyHJaO0DOruZpYLJtaZsrM/FWEi+BMysfzuhXbUQfcDIhEkZyG2yQyYl8TPGJLVk97fth1yA +74FHhOP+8LvyDbmy8JZ2EgZ6OuNtsS8fbrESR3LDj45unpSBl3UGUPd1UzdqnV/Lu1QAS2kS8X2miN03 +8l+PKnNL9RUAAP//k31n5bcBAAA= +`, + }, + + "/image-manifest-schema.json": { + local: "image-manifest-schema.json", + size: 921, + modtime: 1515512099, + compressed: ` +H4sIAAAAAAAC/5ySMW8iMRCF+/0VI0MJ+O501bZXUZxSJEoTpXB2x7uDWNsZmygo4r9HtnHAkCKifTvv +zTdv/dEAiB59x+QCWSNaEHcOzT9rgiKDDOtJDQj/lSGNPsC9w440dSpNL6J97rsRJxWtYwiulXLjrVlm +dWV5kD0rHZa//sqszbKP+mLxrZTWoenKVp9seVpSJJDTkSB7w95hdNuXDXZHzbF1yIHQixbiYQAiRzwi ++3xclq9vfhjJgybc9uDzheghjAhpOZTlkPPgLQeC8qAMkAk4ICeKFH7bZbKG/Uort16tmcjQtJtEC39O +mnovWpIO+YvorNE0nDcwZ9QxNqKhCcvSiOVV/H+ism/VHtmf2wuVYlb7imkdcIqjv099HJVi/ul2gENF +oYyxIb28CuXGus/TFpet9Kj9JdRM9qjJULJU9qawJlLB+Lojxoj19N07rP9JXXED8Nwcms8AAAD//7u3 +Dj+ZAwAA +`, + }, + + "/": { + isDir: true, + local: "", + }, +} diff --git a/vendor/github.com/opencontainers/image-spec/schema/gen.go b/vendor/github.com/opencontainers/image-spec/schema/gen.go new file mode 100644 index 0000000..ae78604 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/schema/gen.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +// Generates an embbedded http.FileSystem for all schema files +// using esc (https://github.com/mjibson/esc). + +// This should generally be invoked with `make schema-fs` +//go:generate esc -private -pkg=schema -include=.*\.json$ . diff --git a/vendor/github.com/opencontainers/image-spec/schema/loader.go b/vendor/github.com/opencontainers/image-spec/schema/loader.go new file mode 100644 index 0000000..c6bde00 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/schema/loader.go @@ -0,0 +1,126 @@ +// Copyright 2018 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/xeipuuv/gojsonreference" + "github.com/xeipuuv/gojsonschema" +) + +// fsLoaderFactory implements gojsonschema.JSONLoaderFactory by reading files under the specified namespaces from the root of fs. +type fsLoaderFactory struct { + namespaces []string + fs http.FileSystem +} + +// newFSLoaderFactory returns a fsLoaderFactory reading files under the specified namespaces from the root of fs. +func newFSLoaderFactory(namespaces []string, fs http.FileSystem) *fsLoaderFactory { + return &fsLoaderFactory{ + namespaces: namespaces, + fs: fs, + } +} + +func (factory *fsLoaderFactory) New(source string) gojsonschema.JSONLoader { + return &fsLoader{ + factory: factory, + source: source, + } +} + +// refContents returns the contents of ref, if available in fsLoaderFactory. +func (factory *fsLoaderFactory) refContents(ref gojsonreference.JsonReference) ([]byte, error) { + refStr := ref.String() + path := "" + for _, ns := range factory.namespaces { + if strings.HasPrefix(refStr, ns) { + path = "/" + strings.TrimPrefix(refStr, ns) + break + } + } + if path == "" { + return nil, fmt.Errorf("Schema reference %#v unexpectedly not available in fsLoaderFactory with namespaces %#v", path, factory.namespaces) + } + + f, err := factory.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + return ioutil.ReadAll(f) +} + +// fsLoader implements gojsonschema.JSONLoader by reading the document named by source from a fsLoaderFactory. +type fsLoader struct { + factory *fsLoaderFactory + source string +} + +// JsonSource implements gojsonschema.JSONLoader.JsonSource. The "Json" capitalization needs to be maintained to conform to the interface. +func (l *fsLoader) JsonSource() interface{} { // nolint: golint + return l.source +} + +func (l *fsLoader) LoadJSON() (interface{}, error) { + // Based on gojsonschema.jsonReferenceLoader.LoadJSON. + reference, err := gojsonreference.NewJsonReference(l.source) + if err != nil { + return nil, err + } + + refToURL := reference + refToURL.GetUrl().Fragment = "" + + body, err := l.factory.refContents(refToURL) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(body)) +} + +// decodeJSONUsingNumber returns JSON parsed from an io.Reader +func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { + // Copied from gojsonschema. + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil +} + +// JsonReference implements gojsonschema.JSONLoader.JsonReference. The "Json" capitalization needs to be maintained to conform to the interface. +func (l *fsLoader) JsonReference() (gojsonreference.JsonReference, error) { // nolint: golint + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *fsLoader) LoaderFactory() gojsonschema.JSONLoaderFactory { + return l.factory +} diff --git a/vendor/github.com/opencontainers/image-spec/schema/schema.go b/vendor/github.com/opencontainers/image-spec/schema/schema.go new file mode 100644 index 0000000..239eefa --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/schema/schema.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "net/http" + + "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Media types for the OCI image formats +const ( + ValidatorMediaTypeDescriptor Validator = v1.MediaTypeDescriptor + ValidatorMediaTypeLayoutHeader Validator = v1.MediaTypeLayoutHeader + ValidatorMediaTypeManifest Validator = v1.MediaTypeImageManifest + ValidatorMediaTypeImageIndex Validator = v1.MediaTypeImageIndex + ValidatorMediaTypeImageConfig Validator = v1.MediaTypeImageConfig + ValidatorMediaTypeImageLayer unimplemented = v1.MediaTypeImageLayer +) + +var ( + // fs stores the embedded http.FileSystem + // having the OCI JSON schema files in root "/". + fs = _escFS(false) + + // schemaNamespaces is a set of URI prefixes which are treated as containing the schema files of fs. + // This is necessary because *.json schema files in this directory use "id" and "$ref" attributes which evaluate to such URIs, e.g. + // ./image-manifest-schema.json URI contains + // "id": "https://opencontainers.org/schema/image/manifest", + // and + // "$ref": "content-descriptor.json" + // which evaluates as a link to https://opencontainers.org/schema/image/content-descriptor.json . + // + // To support such links without accessing the network (and trying to load content which is not hosted at these URIs), + // fsLoaderFactory accepts any URI starting with one of the schemaNamespaces below, + // and uses _escFS to load them from the root of its in-memory filesystem tree. + // + // (Note that this must contain subdirectories before its parent directories for fsLoaderFactory.refContents to work.) + schemaNamespaces = []string{ + "https://opencontainers.org/schema/image/descriptor/", + "https://opencontainers.org/schema/image/index/", + "https://opencontainers.org/schema/image/manifest/", + "https://opencontainers.org/schema/image/", + "https://opencontainers.org/schema/descriptor/", + "https://opencontainers.org/schema/", + } + + // specs maps OCI schema media types to schema URIs. + // These URIs are expected to be used only by fsLoaderFactory (which trims schemaNamespaces defined above) + // and should never cause a network access. + specs = map[Validator]string{ + ValidatorMediaTypeDescriptor: "https://opencontainers.org/schema/content-descriptor.json", + ValidatorMediaTypeLayoutHeader: "https://opencontainers.org/schema/image/image-layout-schema.json", + ValidatorMediaTypeManifest: "https://opencontainers.org/schema/image/image-manifest-schema.json", + ValidatorMediaTypeImageIndex: "https://opencontainers.org/schema/image/image-index-schema.json", + ValidatorMediaTypeImageConfig: "https://opencontainers.org/schema/image/config-schema.json", + } +) + +// FileSystem returns an in-memory filesystem including the schema files. +// The schema files are located at the root directory. +func FileSystem() http.FileSystem { + return fs +} diff --git a/vendor/github.com/opencontainers/image-spec/schema/validator.go b/vendor/github.com/opencontainers/image-spec/schema/validator.go new file mode 100644 index 0000000..029217c --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/schema/validator.go @@ -0,0 +1,224 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "regexp" + + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" +) + +// Validator wraps a media type string identifier +// and implements validation against a JSON schema. +type Validator string + +type validateFunc func(r io.Reader) error + +var mapValidate = map[Validator]validateFunc{ + ValidatorMediaTypeImageConfig: validateConfig, + ValidatorMediaTypeDescriptor: validateDescriptor, + ValidatorMediaTypeImageIndex: validateIndex, + ValidatorMediaTypeManifest: validateManifest, +} + +// ValidationError contains all the errors that happened during validation. +type ValidationError struct { + Errs []error +} + +func (e ValidationError) Error() string { + return fmt.Sprintf("%v", e.Errs) +} + +// Validate validates the given reader against the schema of the wrapped media type. +func (v Validator) Validate(src io.Reader) error { + buf, err := ioutil.ReadAll(src) + if err != nil { + return errors.Wrap(err, "unable to read the document file") + } + + if f, ok := mapValidate[v]; ok { + if f == nil { + return fmt.Errorf("internal error: mapValidate[%q] is nil", v) + } + err = f(bytes.NewReader(buf)) + if err != nil { + return err + } + } + + sl := newFSLoaderFactory(schemaNamespaces, fs).New(specs[v]) + ml := gojsonschema.NewStringLoader(string(buf)) + + result, err := gojsonschema.Validate(sl, ml) + if err != nil { + return errors.Wrapf( + WrapSyntaxError(bytes.NewReader(buf), err), + "schema %s: unable to validate", v) + } + + if result.Valid() { + return nil + } + + errs := make([]error, 0, len(result.Errors())) + for _, desc := range result.Errors() { + errs = append(errs, fmt.Errorf("%s", desc)) + } + + return ValidationError{ + Errs: errs, + } +} + +type unimplemented string + +func (v unimplemented) Validate(src io.Reader) error { + return fmt.Errorf("%s: unimplemented", v) +} + +func validateManifest(r io.Reader) error { + header := v1.Manifest{} + + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "error reading the io stream") + } + + err = json.Unmarshal(buf, &header) + if err != nil { + return errors.Wrap(err, "manifest format mismatch") + } + + if header.Config.MediaType != string(v1.MediaTypeImageConfig) { + fmt.Printf("warning: config %s has an unknown media type: %s\n", header.Config.Digest, header.Config.MediaType) + } + + for _, layer := range header.Layers { + if layer.MediaType != string(v1.MediaTypeImageLayer) && + layer.MediaType != string(v1.MediaTypeImageLayerGzip) && + layer.MediaType != string(v1.MediaTypeImageLayerNonDistributable) && + layer.MediaType != string(v1.MediaTypeImageLayerNonDistributableGzip) { + fmt.Printf("warning: layer %s has an unknown media type: %s\n", layer.Digest, layer.MediaType) + } + } + return nil +} + +func validateDescriptor(r io.Reader) error { + header := v1.Descriptor{} + + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "error reading the io stream") + } + + err = json.Unmarshal(buf, &header) + if err != nil { + return errors.Wrap(err, "descriptor format mismatch") + } + + err = header.Digest.Validate() + if err == digest.ErrDigestUnsupported { + // we ignore unsupported algorithms + fmt.Printf("warning: unsupported digest: %q: %v\n", header.Digest, err) + return nil + } + return err +} + +func validateIndex(r io.Reader) error { + header := v1.Index{} + + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "error reading the io stream") + } + + err = json.Unmarshal(buf, &header) + if err != nil { + return errors.Wrap(err, "index format mismatch") + } + + for _, manifest := range header.Manifests { + if manifest.MediaType != string(v1.MediaTypeImageManifest) { + fmt.Printf("warning: manifest %s has an unknown media type: %s\n", manifest.Digest, manifest.MediaType) + } + if manifest.Platform != nil { + checkPlatform(manifest.Platform.OS, manifest.Platform.Architecture) + } + + } + + return nil +} + +func validateConfig(r io.Reader) error { + header := v1.Image{} + + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "error reading the io stream") + } + + err = json.Unmarshal(buf, &header) + if err != nil { + return errors.Wrap(err, "config format mismatch") + } + + checkPlatform(header.OS, header.Architecture) + + envRegexp := regexp.MustCompile(`^[^=]+=.*$`) + for _, e := range header.Config.Env { + if !envRegexp.MatchString(e) { + return errors.Errorf("unexpected env: %q", e) + } + } + + return nil +} + +func checkPlatform(OS string, Architecture string) { + validCombins := map[string][]string{ + "android": {"arm"}, + "darwin": {"386", "amd64", "arm", "arm64"}, + "dragonfly": {"amd64"}, + "freebsd": {"386", "amd64", "arm"}, + "linux": {"386", "amd64", "arm", "arm64", "ppc64", "ppc64le", "mips64", "mips64le", "s390x"}, + "netbsd": {"386", "amd64", "arm"}, + "openbsd": {"386", "amd64", "arm"}, + "plan9": {"386", "amd64"}, + "solaris": {"amd64"}, + "windows": {"386", "amd64"}} + for os, archs := range validCombins { + if os == OS { + for _, arch := range archs { + if arch == Architecture { + return + } + } + fmt.Printf("warning: combination of %q and %q is invalid.\n", OS, Architecture) + } + } + fmt.Printf("warning: operating system %q of the bundle is not supported yet.\n", OS) +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 0000000..35d8108 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 0000000..fe799bd --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 0000000..6e442a0 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 0000000..4e6c4b2 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 0000000..fc79e9e --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 0000000..7ff32c4 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 0000000..bad7bb9 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 0000000..e3f88c1 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-dev" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 0000000..58a1510 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/opencontainers/image-tools/LICENSE b/vendor/github.com/opencontainers/image-tools/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-tools/README.md b/vendor/github.com/opencontainers/image-tools/README.md new file mode 100644 index 0000000..22f771e --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/README.md @@ -0,0 +1,178 @@ +# oci-image-tool [![Build Status](https://travis-ci.org/opencontainers/image-tools.svg?branch=master)](https://travis-ci.org/opencontainers/image-tools)[![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/image-tools)](https://goreportcard.com/report/github.com/opencontainers/image-tools) + +`oci-image-tool` is a collection of tools for working with the [OCI image format specification](https://github.com/opencontainers/image-spec). +To build from source code, image-tools requires Go 1.7.x or above. + +## Install + +It is recommended that use `go get` to download a single command tools. + +``` +$ go get -d github.com/opencontainers/image-tools/cmd/oci-image-tool +$ cd $GOPATH/src/github.com/opencontainers/image-tools/ +$ make all +$ sudo make install +``` + +## Uninstall + +``` +$ sudo make uninstall +``` + +## Example + +### Obtaining an image + +The following examples assume you have a [image-layout](https://github.com/opencontainers/image-spec/blob/v1.0.0-rc2/image-layout.md) tar archive at `busybox-oci`. +One way to acquire that image is with [skopeo](https://github.com/projectatomic/skopeo#installing): + +``` +$ skopeo copy docker://busybox oci:busybox-oci +``` + +### oci-image-tool-create + +More information about `oci-image-tool-create` can be found in its [man page](./man/oci-image-tool-create.1.md) + +``` +$ mkdir busybox-bundle +$ oci-image-tool create --ref latest busybox-oci busybox-bundle +$ cd busybox-bundle && sudo runc run busybox +``` + +### oci-image-tool-validate + +More information about `oci-image-tool-validate` can be found in its [man page](./man/oci-image-tool-validate.1.md) + +``` +$ oci-image-tool validate --type imageLayout --ref latest busybox-oci +busybox-oci: OK +``` + +### oci-image-tool-unpack + +More information about `oci-image-tool-unpack` can be found in its [man page](./man/oci-image-tool-unpack.1.md) + +``` +$ mkdir busybox-bundle +$ oci-image-tool unpack --ref latest busybox-oci busybox-bundle +$ tree busybox-bundle +busybox-bundle +├── bin +│   ├── [ +│   ├── [[ +│   ├── acpid +│   ├── addgroup +│   ├── add-shell +[...] +``` + +# Contributing + +Development happens on GitHub. Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). + +The code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository. + +## Code of Conduct + +Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md). + +## Discuss your design + +The project welcomes submissions, but please let everyone know what you are working on. + +Before undertaking a nontrivial change to this repository, send mail to the [mailing list](#mailing-list) to discuss what you plan to do. +This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits. +It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions. + +Typos and grammatical errors can go straight to a pull-request. +When in doubt, start on the [mailing-list](#mailing-list). + +## Weekly Call + +The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific.) +Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: 888-587-9088 or 860-706-8529 (no PIN needed.) +An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there. +Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived to the [wiki](https://github.com/opencontainers/runtime-spec/wiki) for those who are unable to join the call. + +## Mailing List + +You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev). + +## IRC + +OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]). + +## Git commit + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. +The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +### Commit Style + +Simple house-keeping for clean git history. +Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit). + +1. Separate the subject from body with a blank line +2. Limit the subject line to 50 characters +3. Capitalize the subject line +4. Do not end the subject line with a period +5. Use the imperative mood in the subject line +6. Wrap the body at 72 characters +7. Use the body to explain what and why vs. how + * If there was important/useful/essential conversation or information, copy or include a reference +8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...") + + +[UberConference]: https://www.uberconference.com/opencontainers +[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ diff --git a/vendor/github.com/opencontainers/image-tools/image/autodetect.go b/vendor/github.com/opencontainers/image-tools/image/autodetect.go new file mode 100644 index 0000000..c7f6363 --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/autodetect.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "encoding/json" + "io" + "io/ioutil" + "net/http" + "os" + + "github.com/opencontainers/image-spec/schema" + "github.com/pkg/errors" +) + +// supported autodetection types +const ( + TypeImageLayout = "imageLayout" + TypeImage = "image" + TypeImageZip = "imageZip" + TypeManifest = "manifest" + TypeImageIndex = "imageIndex" + TypeConfig = "config" +) + +// Autodetect detects the validation type for the given path +// or an error if the validation type could not be resolved. +func Autodetect(path string) (string, error) { + fi, err := os.Stat(path) + if err != nil { + return "", errors.Wrapf(err, "unable to access path") // err from os.Stat includes path name + } + + if fi.IsDir() { + return TypeImageLayout, nil + } + + f, err := os.Open(path) + if err != nil { + return "", errors.Wrap(err, "unable to open file") // os.Open includes the filename + } + defer f.Close() + + buf, err := ioutil.ReadAll(io.LimitReader(f, 512)) // read some initial bytes to detect content + if err != nil { + return "", errors.Wrap(err, "unable to read") + } + + mimeType := http.DetectContentType(buf) + + switch mimeType { + case "application/x-gzip", "application/x-rar-compressed", "application/octet-stream": + return TypeImage, nil + case "application/zip": + return TypeImageZip, nil + + case "text/plain; charset=utf-8": + // might be a JSON file, will be handled below + + default: + return "", errors.New("unknown file type") + } + + if _, err := f.Seek(0, io.SeekStart); err != nil { + return "", errors.Wrap(err, "unable to seek") + } + + header := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config interface{} `json:"config"` + }{} + + if err := json.NewDecoder(f).Decode(&header); err != nil { + if _, errSeek := f.Seek(0, io.SeekStart); errSeek != nil { + return "", errors.Wrap(err, "unable to seek") + } + + e := errors.Wrap( + schema.WrapSyntaxError(f, err), + "unable to parse JSON", + ) + + return "", e + } + + switch { + case header.MediaType == string(schema.ValidatorMediaTypeManifest): + return TypeManifest, nil + + case header.MediaType == string(schema.ValidatorMediaTypeImageIndex): + return TypeImageIndex, nil + + case header.MediaType == "" && header.SchemaVersion == 0 && header.Config != nil: + // config files don't have mediaType/schemaVersion header + return TypeConfig, nil + } + + return "", errors.New("unknown media type") +} diff --git a/vendor/github.com/opencontainers/image-tools/image/config.go b/vendor/github.com/opencontainers/image-tools/image/config.go new file mode 100644 index 0000000..d28b1bc --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/config.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/image-spec/schema" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type config v1.Image + +func findConfig(w walker, d *v1.Descriptor) (*config, error) { + var c config + cpath := filepath.Join("blobs", string(d.Digest.Algorithm()), d.Digest.Hex()) + + switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if info.IsDir() || filepath.Clean(path) != cpath { + return nil + } + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "%s: error reading config", path) + } + + if err := schema.ValidatorMediaTypeImageConfig.Validate(bytes.NewReader(buf)); err != nil { + return errors.Wrapf(err, "%s: config validation failed", path) + } + + if err := json.Unmarshal(buf, &c); err != nil { + return err + } + + return errEOW + }); err { + case nil: + return nil, fmt.Errorf("%s: config not found", cpath) + case errEOW: + return &c, nil + default: + return nil, err + } +} + +func (c *config) runtimeSpec(rootfs string) (*specs.Spec, error) { + if c.OS != "linux" { + return nil, fmt.Errorf("%s: unsupported OS", c.OS) + } + + var s specs.Spec + s.Version = specs.Version + // we should at least apply the default spec, otherwise this is totally useless + s.Root = &specs.Root{} + s.Root.Path = rootfs + + s.Process = &specs.Process{} + s.Process.Terminal = true + s.Process.Cwd = "/" + if c.Config.WorkingDir != "" { + s.Process.Cwd = c.Config.WorkingDir + } + s.Process.Env = append(s.Process.Env, c.Config.Env...) + s.Process.Args = append(s.Process.Args, c.Config.Entrypoint...) + s.Process.Args = append(s.Process.Args, c.Config.Cmd...) + + if len(s.Process.Args) == 0 { + s.Process.Args = append(s.Process.Args, "sh") + } + + if uid, err := strconv.Atoi(c.Config.User); err == nil { + s.Process.User.UID = uint32(uid) + } else if ug := strings.Split(c.Config.User, ":"); len(ug) == 2 { + uid, err := strconv.Atoi(ug[0]) + if err != nil { + return nil, errors.New("config.User: unsupported uid format") + } + + gid, err := strconv.Atoi(ug[1]) + if err != nil { + return nil, errors.New("config.User: unsupported gid format") + } + + s.Process.User.UID = uint32(uid) + s.Process.User.GID = uint32(gid) + } else if c.Config.User != "" { + return nil, errors.New("config.User: unsupported format") + } + + s.Linux = &specs.Linux{} + + for vol := range c.Config.Volumes { + s.Mounts = append( + s.Mounts, + specs.Mount{ + Destination: vol, + Type: "bind", + Options: []string{"rbind"}, + }, + ) + } + + return &s, nil +} diff --git a/vendor/github.com/opencontainers/image-tools/image/descriptor.go b/vendor/github.com/opencontainers/image-tools/image/descriptor.go new file mode 100644 index 0000000..fb3ae97 --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/descriptor.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const indexPath = "index.json" + +func listReferences(w walker) (map[string]*v1.Descriptor, error) { + refs := make(map[string]*v1.Descriptor) + var index v1.Index + + if err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if info.IsDir() || filepath.Clean(path) != indexPath { + return nil + } + + if err := json.NewDecoder(r).Decode(&index); err != nil { + return err + } + + for i := 0; i < len(index.Manifests); i++ { + if index.Manifests[i].Annotations[v1.AnnotationRefName] != "" { + refs[index.Manifests[i].Annotations[v1.AnnotationRefName]] = &index.Manifests[i] + } + } + + return nil + }); err != nil { + return nil, err + } + return refs, nil +} + +func findDescriptor(w walker, name string) (*v1.Descriptor, error) { + var d v1.Descriptor + var index v1.Index + + switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if info.IsDir() || filepath.Clean(path) != indexPath { + return nil + } + + if err := json.NewDecoder(r).Decode(&index); err != nil { + return err + } + + for i := 0; i < len(index.Manifests); i++ { + if index.Manifests[i].Annotations[v1.AnnotationRefName] == name { + d = index.Manifests[i] + return errEOW + } + } + + return nil + }); err { + case nil: + return nil, fmt.Errorf("index.json: descriptor %q not found", name) + case errEOW: + return &d, nil + default: + return nil, err + } +} + +func validateDescriptor(d *v1.Descriptor, w walker, mts []string) error { + var found bool + for _, mt := range mts { + if d.MediaType == mt { + found = true + break + } + } + if !found { + return fmt.Errorf("invalid descriptor MediaType %q", d.MediaType) + } + + if err := d.Digest.Validate(); err != nil { + return err + } + + // Copy the contents of the layer in to the verifier + verifier := d.Digest.Verifier() + numBytes, err := w.get(*d, verifier) + if err != nil { + return err + } + + if err != nil { + return errors.Wrap(err, "error generating hash") + } + + if numBytes != d.Size { + return errors.New("size mismatch") + } + + if !verifier.Verified() { + return errors.New("digest mismatch") + } + + return nil +} diff --git a/vendor/github.com/opencontainers/image-tools/image/doc.go b/vendor/github.com/opencontainers/image-tools/image/doc.go new file mode 100644 index 0000000..fc5de0f --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package image defines methods for validating, and unpacking OCI images. +package image diff --git a/vendor/github.com/opencontainers/image-tools/image/image.go b/vendor/github.com/opencontainers/image-tools/image/image.go new file mode 100644 index 0000000..e5c0fe1 --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/image.go @@ -0,0 +1,388 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ValidateLayout walks through the given file tree and validates the manifest +// pointed to by the given refs or returns an error if the validation failed. +func ValidateLayout(src string, refs []string, out *log.Logger) error { + return validate(newPathWalker(src), refs, out) +} + +// ValidateZip walks through the given file tree and validates the manifest +// pointed to by the given refs or returns an error if the validation failed. +func ValidateZip(src string, refs []string, out *log.Logger) error { + return validate(newZipWalker(src), refs, out) +} + +// ValidateFile opens the tar file given by the filename, then calls ValidateReader +func ValidateFile(tarFile string, refs []string, out *log.Logger) error { + f, err := os.Open(tarFile) + if err != nil { + return errors.Wrap(err, "unable to open file") + } + defer f.Close() + + return Validate(f, refs, out) +} + +// Validate walks through a tar stream and validates the manifest. +// * Check that all refs point to extant blobs +// * Checks that all referred blobs are valid +// * Checks that mime-types are correct +// returns error on validation failure +func Validate(r io.ReadSeeker, refs []string, out *log.Logger) error { + return validate(newTarWalker(r), refs, out) +} + +var validRefMediaTypes = []string{ + v1.MediaTypeImageManifest, + v1.MediaTypeImageIndex, +} + +func validate(w walker, refs []string, out *log.Logger) error { + if err := layoutValidate(w); err != nil { + return err + } + + ds, err := listReferences(w) + if err != nil { + return err + } + if len(refs) == 0 && len(ds) == 0 { + // TODO(runcom): ugly, we'll need a better way and library + // to express log levels. + // see https://github.com/opencontainers/image-spec/issues/288 + out.Print("WARNING: no descriptors found") + } + + if len(refs) == 0 { + for ref := range ds { + refs = append(refs, ref) + } + } + + for _, ref := range refs { + d, ok := ds[ref] + if !ok { + // TODO(runcom): + // soften this error to a warning if the user didn't ask for any specific reference + // with --ref but she's just validating the whole image. + return fmt.Errorf("reference %s not found", ref) + } + + if err = validateDescriptor(d, w, validRefMediaTypes); err != nil { + return err + } + + if d.MediaType == validRefMediaTypes[0] { + m, err := findManifest(w, d) + if err != nil { + return err + } + + if err := m.validate(w); err != nil { + return err + } + } + + if d.MediaType == validRefMediaTypes[1] { + index, err := findIndex(w, d) + if err != nil { + return err + } + + if err := validateIndex(index, w); err != nil { + return err + } + + if len(index.Manifests) == 0 { + fmt.Println("warning: no manifests found") + return nil + } + + for _, manifest := range index.Manifests { + m, err := findManifest(w, &manifest) + if err != nil { + return err + } + + if err := m.validate(w); err != nil { + return err + } + } + } + + if out != nil { + out.Printf("reference %q: OK", ref) + } + } + + return nil +} + +// UnpackLayout walks through the file tree given by src and, using the layers +// specified in the manifest pointed to by the given ref, unpacks all layers in +// the given destination directory or returns an error if the unpacking failed. +func UnpackLayout(src, dest, ref, platform string) error { + return unpack(newPathWalker(src), dest, ref, platform) +} + +// UnpackZip opens and walks through the zip file given by src and, using the layers +// specified in the manifest pointed to by the given ref, unpacks all layers in +// the given destination directory or returns an error if the unpacking failed. +func UnpackZip(src, dest, ref, platform string) error { + return unpack(newZipWalker(src), dest, ref, platform) +} + +// UnpackFile opens the file pointed by tarFileName and calls Unpack on it. +func UnpackFile(tarFileName, dest, ref, platform string) error { + f, err := os.Open(tarFileName) + if err != nil { + return errors.Wrap(err, "unable to open file") + } + defer f.Close() + + return Unpack(f, dest, ref, platform) +} + +// Unpack walks through the tar stream and, using the layers specified in +// the manifest pointed to by the given ref, unpacks all layers in the given +// destination directory or returns an error if the unpacking failed. +// The destination will be created if it does not exist. +func Unpack(r io.ReadSeeker, dest, refName, platform string) error { + return unpack(newTarWalker(r), dest, refName, platform) +} + +func unpack(w walker, dest, refName, platform string) error { + if err := layoutValidate(w); err != nil { + return err + } + + ref, err := findDescriptor(w, refName) + if err != nil { + return err + } + + if err = validateDescriptor(ref, w, validRefMediaTypes); err != nil { + return err + } + + if ref.MediaType == validRefMediaTypes[0] { + m, err := findManifest(w, ref) + if err != nil { + return err + } + + if err := m.validate(w); err != nil { + return err + } + + return m.unpack(w, dest) + } + + if ref.MediaType == validRefMediaTypes[1] { + index, err := findIndex(w, ref) + if err != nil { + return err + } + + if err = validateIndex(index, w); err != nil { + return err + } + + manifests, err := filterManifest(w, index.Manifests, platform) + if err != nil { + return err + } + + for _, m := range manifests { + return m.unpack(w, dest) + } + } + + return nil +} + +// CreateRuntimeBundleLayout walks through the file tree given by src and +// creates an OCI runtime bundle in the given destination dest +// or returns an error if the unpacking failed. +func CreateRuntimeBundleLayout(src, dest, ref, root, platform string) error { + return createRuntimeBundle(newPathWalker(src), dest, ref, root, platform) +} + +// CreateRuntimeBundleZip opens and walks through the zip file given by src +// and creates an OCI runtime bundle in the given destination dest +// or returns an error if the unpacking failed. +func CreateRuntimeBundleZip(src, dest, ref, root, platform string) error { + return createRuntimeBundle(newZipWalker(src), dest, ref, root, platform) +} + +// CreateRuntimeBundleFile opens the file pointed by tarFile and calls +// CreateRuntimeBundle. +func CreateRuntimeBundleFile(tarFile, dest, ref, root, platform string) error { + f, err := os.Open(tarFile) + if err != nil { + return errors.Wrap(err, "unable to open file") + } + defer f.Close() + + return createRuntimeBundle(newTarWalker(f), dest, ref, root, platform) +} + +// CreateRuntimeBundle walks through the given tar stream and +// creates an OCI runtime bundle in the given destination dest +// or returns an error if the unpacking failed. +func CreateRuntimeBundle(r io.ReadSeeker, dest, ref, root, platform string) error { + return createRuntimeBundle(newTarWalker(r), dest, ref, root, platform) +} + +func createRuntimeBundle(w walker, dest, refName, rootfs, platform string) error { + if err := layoutValidate(w); err != nil { + return err + } + + ref, err := findDescriptor(w, refName) + if err != nil { + return err + } + + if err = validateDescriptor(ref, w, validRefMediaTypes); err != nil { + return err + } + + if ref.MediaType == validRefMediaTypes[0] { + m, err := findManifest(w, ref) + if err != nil { + return err + } + + if err := m.validate(w); err != nil { + return err + } + + return createBundle(w, m, dest, rootfs) + } + + if ref.MediaType == validRefMediaTypes[1] { + index, err := findIndex(w, ref) + if err != nil { + return err + } + + if err = validateIndex(index, w); err != nil { + return err + } + + manifests, err := filterManifest(w, index.Manifests, platform) + if err != nil { + return err + } + + for _, m := range manifests { + return createBundle(w, m, dest, rootfs) + } + } + + return nil +} + +func createBundle(w walker, m *manifest, dest, rootfs string) (retErr error) { + c, err := findConfig(w, &m.Config) + if err != nil { + return err + } + + if _, err = os.Stat(dest); err != nil { + if os.IsNotExist(err) { + if err2 := os.MkdirAll(dest, 0755); err2 != nil { + return err2 + } + defer func() { + if retErr != nil { + if err3 := os.RemoveAll(dest); err3 != nil { + fmt.Printf("Failed to clean up %q: %s\n", dest, err3.Error()) + } + } + }() + } else { + return err + } + } + + if err = m.unpack(w, filepath.Join(dest, rootfs)); err != nil { + return err + } + + spec, err := c.runtimeSpec(rootfs) + if err != nil { + return err + } + + f, err := os.Create(filepath.Join(dest, "config.json")) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(spec) +} + +// filertManifest returns a filtered list of manifests +func filterManifest(w walker, Manifests []v1.Descriptor, platform string) ([]*manifest, error) { + var manifests []*manifest + + argsParts := strings.Split(platform, ":") + if len(argsParts) != 2 { + return manifests, fmt.Errorf("platform must have os and arch when reftype is index") + } + + if len(Manifests) == 0 { + fmt.Println("warning: no manifests found") + return manifests, nil + } + + for _, manifest := range Manifests { + m, err := findManifest(w, &manifest) + if err != nil { + return manifests, err + } + + if err := m.validate(w); err != nil { + return manifests, err + } + if strings.EqualFold(manifest.Platform.OS, argsParts[0]) && strings.EqualFold(manifest.Platform.Architecture, argsParts[1]) { + manifests = append(manifests, m) + } + } + + if len(manifests) == 0 { + return manifests, fmt.Errorf("There is no matching manifest") + } + + return manifests, nil +} diff --git a/vendor/github.com/opencontainers/image-tools/image/index.go b/vendor/github.com/opencontainers/image-tools/image/index.go new file mode 100644 index 0000000..f0c8d61 --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/index.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/opencontainers/image-spec/schema" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func findIndex(w walker, d *v1.Descriptor) (*v1.Index, error) { + var index v1.Index + ipath := filepath.Join("blobs", string(d.Digest.Algorithm()), d.Digest.Hex()) + + switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if info.IsDir() || filepath.Clean(path) != ipath { + return nil + } + + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "%s: error reading index", path) + } + + if err := schema.ValidatorMediaTypeImageIndex.Validate(bytes.NewReader(buf)); err != nil { + return errors.Wrapf(err, "%s: index validation failed", path) + } + + if err := json.Unmarshal(buf, &index); err != nil { + return err + } + + return errEOW + }); err { + case errEOW: + return &index, nil + case nil: + return nil, fmt.Errorf("index not found") + default: + return nil, err + } +} + +func validateIndex(index *v1.Index, w walker) error { + for _, manifest := range index.Manifests { + if err := validateDescriptor(&manifest, w, []string{v1.MediaTypeImageManifest}); err != nil { + return errors.Wrap(err, "manifest validation failed") + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/image-tools/image/layout.go b/vendor/github.com/opencontainers/image-tools/image/layout.go new file mode 100644 index 0000000..dfa395d --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/layout.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/image-spec/schema" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func layoutValidate(w walker) error { + var blobsExist, indexExist, layoutExist bool + + if err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if strings.EqualFold(filepath.Base(path), "blobs") { + blobsExist = true + if !info.IsDir() { + return fmt.Errorf("blobs is not a directory") + } + + return nil + } + + if strings.EqualFold(filepath.Base(path), "index.json") { + indexExist = true + if info.IsDir() { + return fmt.Errorf("index.json is a directory") + } + + var index v1.Index + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrap(err, "error reading index.json") + } + + if err := json.Unmarshal(buf, &index); err != nil { + return errors.Wrap(err, "index.json format mismatch") + } + + return nil + } + + if strings.EqualFold(filepath.Base(path), "oci-layout") { + layoutExist = true + if info.IsDir() { + return fmt.Errorf("oci-layout is a directory") + } + + var imageLayout v1.ImageLayout + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrap(err, "error reading oci-layout") + } + + if err := schema.ValidatorMediaTypeLayoutHeader.Validate(bytes.NewReader(buf)); err != nil { + return errors.Wrap(err, "oci-layout: imageLayout validation failed") + } + + if err := json.Unmarshal(buf, &imageLayout); err != nil { + return errors.Wrap(err, "oci-layout format mismatch") + } + + return nil + } + + return nil + }); err != nil { + return err + } + + if !blobsExist { + return fmt.Errorf("image layout must contain blobs directory") + } + + if !indexExist { + return fmt.Errorf("image layout must contain index.json file") + } + + if !layoutExist { + return fmt.Errorf("image layout must contain oci-layout file") + } + + return nil +} diff --git a/vendor/github.com/opencontainers/image-tools/image/manifest.go b/vendor/github.com/opencontainers/image-tools/image/manifest.go new file mode 100644 index 0000000..52617e5 --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/manifest.go @@ -0,0 +1,317 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/opencontainers/image-spec/schema" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifest struct { + Config v1.Descriptor `json:"config"` + Layers []v1.Descriptor `json:"layers"` +} + +func findManifest(w walker, d *v1.Descriptor) (*manifest, error) { + var m manifest + mpath := filepath.Join("blobs", string(d.Digest.Algorithm()), d.Digest.Hex()) + + switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if info.IsDir() || filepath.Clean(path) != mpath { + return nil + } + + buf, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "%s: error reading manifest", path) + } + + if err := schema.ValidatorMediaTypeManifest.Validate(bytes.NewReader(buf)); err != nil { + return errors.Wrapf(err, "%s: manifest validation failed", path) + } + + if err := json.Unmarshal(buf, &m); err != nil { + return err + } + + return errEOW + }); err { + case nil: + return nil, fmt.Errorf("%s: manifest not found", mpath) + case errEOW: + return &m, nil + default: + return nil, err + } +} + +func (m *manifest) validate(w walker) error { + if err := validateDescriptor(&m.Config, w, []string{v1.MediaTypeImageConfig}); err != nil { + return errors.Wrap(err, "config validation failed") + } + + validLayerMediaTypes := []string{ + v1.MediaTypeImageLayer, + v1.MediaTypeImageLayerGzip, + v1.MediaTypeImageLayerNonDistributable, + v1.MediaTypeImageLayerNonDistributableGzip, + } + + for _, d := range m.Layers { + if err := validateDescriptor(&d, w, validLayerMediaTypes); err != nil { + return errors.Wrap(err, "layer validation failed") + } + } + + return nil +} + +func (m *manifest) unpack(w walker, dest string) (retErr error) { + // error out if the dest directory is not empty + s, err := ioutil.ReadDir(dest) + if err != nil && !os.IsNotExist(err) { // We'll create the dir later + return errors.Wrap(err, "unpack: unable to open dest") // err contains dest + } + if len(s) > 0 { + return fmt.Errorf("%s is not empty", dest) + } + defer func() { + // if we encounter error during unpacking + // clean up the partially-unpacked destination + if retErr != nil { + if err := os.RemoveAll(dest); err != nil { + fmt.Printf("Error: failed to remove partially-unpacked destination %v", err) + } + } + }() + for _, d := range m.Layers { + switch err := w.walk(func(path string, info os.FileInfo, r io.Reader) error { + if info.IsDir() { + return nil + } + + dd, err := filepath.Rel(filepath.Join("blobs", string(d.Digest.Algorithm())), filepath.Clean(path)) + if err != nil || d.Digest.Hex() != dd { + return nil + } + + if err := unpackLayer(d.MediaType, path, dest, r); err != nil { + return errors.Wrap(err, "error unpack: extracting layer") + } + + return errEOW + }); err { + case nil: + return fmt.Errorf("%s: layer not found", dest) + case errEOW: + default: + return err + } + } + return nil +} + +func getReader(path, mediaType, comp string, buf io.Reader) (io.Reader, error) { + switch comp { + case "gzip": + if !strings.HasSuffix(mediaType, "+gzip") { + logrus.Debugf("%q: %s media type with non-%s file", path, comp, comp) + } + + return gzip.NewReader(buf) + case "bzip2": + if !strings.HasSuffix(mediaType, "+bzip2") { + logrus.Debugf("%q: %s media type with non-%s file", path, comp, comp) + } + + return bzip2.NewReader(buf), nil + case "xz": + return nil, errors.New("xz layers are not supported") + default: + if strings.Contains(mediaType, "+") { + logrus.Debugf("%q: %s media type with non-%s file", path, comp, comp) + } + + return buf, nil + } +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(r *bufio.Reader) (string, error) { + source, err := r.Peek(10) + if err != nil { + return "", err + } + + for compression, m := range map[string][]byte{ + "bzip2": {0x42, 0x5A, 0x68}, + "gzip": {0x1F, 0x8B, 0x08}, + // FIXME needs decompression support + // "xz": {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression, nil + } + } + return "plain", nil +} + +func unpackLayer(mediaType, path, dest string, r io.Reader) error { + entries := make(map[string]bool) + + buf := bufio.NewReader(r) + + comp, err := DetectCompression(buf) + if err != nil { + return err + } + + reader, err := getReader(path, mediaType, comp, buf) + if err != nil { + return err + } + + var dirs []*tar.Header + tr := tar.NewReader(reader) + +loop: + for { + hdr, err := tr.Next() + switch err { + case io.EOF: + break loop + case nil: + // success, continue below + default: + return errors.Wrapf(err, "error advancing tar stream") + } + + hdr.Name = filepath.Clean(hdr.Name) + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) { + if err3 := os.MkdirAll(parentPath, 0755); err3 != nil { + return err3 + } + } + } + path := filepath.Join(dest, hdr.Name) + if entries[path] { + return fmt.Errorf("duplicate entry for %s", path) + } + entries[path] = true + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + info := hdr.FileInfo() + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return fmt.Errorf("%q is outside of %q", hdr.Name, dest) + } + + if strings.HasPrefix(info.Name(), ".wh.") { + path = strings.Replace(path, ".wh.", "", 1) + + if err := os.RemoveAll(path); err != nil { + return errors.Wrap(err, "unable to delete whiteout path") + } + + continue loop + } + + switch hdr.Typeflag { + case tar.TypeDir: + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err2 := os.MkdirAll(path, info.Mode()); err2 != nil { + return errors.Wrap(err2, "error creating directory") + } + } + + case tar.TypeReg, tar.TypeRegA: + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode()) + if err != nil { + return errors.Wrap(err, "unable to open file") + } + + if _, err := io.Copy(f, tr); err != nil { + f.Close() + return errors.Wrap(err, "unable to copy") + } + f.Close() + + case tar.TypeLink: + target := filepath.Join(dest, hdr.Linkname) + + if !strings.HasPrefix(target, dest) { + return fmt.Errorf("invalid hardlink %q -> %q", target, hdr.Linkname) + } + + if err := os.Link(target, path); err != nil { + return err + } + + case tar.TypeSymlink: + target := filepath.Join(filepath.Dir(path), hdr.Linkname) + + if !strings.HasPrefix(target, dest) { + return fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname) + } + + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + case tar.TypeXGlobalHeader: + return nil + } + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + finfo := hdr.FileInfo() + // I believe the old version was using time.Now().UTC() to overcome an + // invalid error from chtimes.....but here we lose hdr.AccessTime like this... + if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil { + return errors.Wrap(err, "error changing time") + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/image-tools/image/project.go b/vendor/github.com/opencontainers/image-tools/image/project.go new file mode 100644 index 0000000..b87186f --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/project.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +// SpecURL is the URL for the image-spec repository +var SpecURL = "https://github.com/opencontainers/image-spec" + +// IssuesURL is the URL for the issues of image-tools +var IssuesURL = "https://github.com/opencontainers/image-tools/issues" diff --git a/vendor/github.com/opencontainers/image-tools/image/walker.go b/vendor/github.com/opencontainers/image-tools/image/walker.go new file mode 100644 index 0000000..04a77f0 --- /dev/null +++ b/vendor/github.com/opencontainers/image-tools/image/walker.go @@ -0,0 +1,251 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package image + +import ( + "archive/tar" + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + errEOW = fmt.Errorf("end of walk") // error to signal stop walking +) + +// walkFunc is a function type that gets called for each file or directory visited by the Walker. +type walkFunc func(path string, _ os.FileInfo, _ io.Reader) error + +// walker is the interface that defines how to access a given archival format +type walker interface { + + // walk calls walkfunc for every entity in the archive + walk(walkFunc) error + + // get will copy an arbitrary blob, defined by desc, in to dst. returns + // the number of bytes copied on success. + get(desc v1.Descriptor, dst io.Writer) (int64, error) +} + +// tarWalker exposes access to image layouts in a tar file. +type tarWalker struct { + r io.ReadSeeker + + // Synchronize use of the reader + mut sync.Mutex +} + +// newTarWalker returns a Walker that walks through .tar files. +func newTarWalker(r io.ReadSeeker) walker { + return &tarWalker{r: r} +} + +func (w *tarWalker) walk(f walkFunc) error { + w.mut.Lock() + defer w.mut.Unlock() + + if _, err := w.r.Seek(0, io.SeekStart); err != nil { + return errors.Wrapf(err, "unable to reset") + } + + tr := tar.NewReader(w.r) + +loop: + for { + hdr, err := tr.Next() + switch err { + case io.EOF: + break loop + case nil: + // success, continue below + default: + return errors.Wrapf(err, "error advancing tar stream") + } + + info := hdr.FileInfo() + if err := f(hdr.Name, info, tr); err != nil { + return err + } + } + + return nil +} + +func (w *tarWalker) get(desc v1.Descriptor, dst io.Writer) (int64, error) { + var bytes int64 + done := false + + expectedPath := filepath.Join("blobs", string(desc.Digest.Algorithm()), desc.Digest.Hex()) + + f := func(path string, info os.FileInfo, rdr io.Reader) error { + var err error + if done { + return nil + } + + if filepath.Clean(path) == expectedPath && !info.IsDir() { + if bytes, err = io.Copy(dst, rdr); err != nil { + return errors.Wrapf(err, "get failed: failed to copy blob to destination") + } + done = true + } + return nil + } + + if err := w.walk(f); err != nil { + return 0, errors.Wrapf(err, "get failed: unable to walk") + } + if !done { + return 0, os.ErrNotExist + } + + return bytes, nil +} + +type eofReader struct{} + +func (eofReader) Read(_ []byte) (int, error) { + return 0, io.EOF +} + +type pathWalker struct { + root string +} + +// newPathWalker returns a Walker that walks through directories +// starting at the given root path. It does not follow symlinks. +func newPathWalker(root string) walker { + return &pathWalker{root} +} + +func (w *pathWalker) walk(f walkFunc) error { + return filepath.Walk(w.root, func(path string, info os.FileInfo, err error) error { + // MUST check error value, to make sure the `os.FileInfo` is available. + // Otherwise panic risk will exist. + if err != nil { + return errors.Wrap(err, "error walking path") + } + + rel, err := filepath.Rel(w.root, path) + if err != nil { + return errors.Wrap(err, "error walking path") // err from filepath.Walk includes path name + } + + if info.IsDir() { // behave like a tar reader for directories + return f(rel, info, eofReader{}) + } + + file, err := os.Open(path) + if err != nil { + return errors.Wrap(err, "unable to open file") // os.Open includes the path + } + defer file.Close() + + return f(rel, info, file) + }) +} + +func (w *pathWalker) get(desc v1.Descriptor, dst io.Writer) (int64, error) { + name := filepath.Join(w.root, "blobs", string(desc.Digest.Algorithm()), desc.Digest.Hex()) + + info, err := os.Stat(name) + if err != nil { + return 0, err + } + + if info.IsDir() { + return 0, fmt.Errorf("object is dir") + } + + fp, err := os.Open(name) + if err != nil { + return 0, errors.Wrapf(err, "get failed") + } + defer fp.Close() + + nbytes, err := io.Copy(dst, fp) + if err != nil { + return 0, errors.Wrapf(err, "get failed: failed to copy blob to destination") + } + return nbytes, nil +} + +type zipWalker struct { + fileName string +} + +// newWalkWalker returns a Walker that walks through .zip files. +func newZipWalker(fileName string) walker { + return &zipWalker{fileName} +} + +func (w *zipWalker) walk(f walkFunc) error { + r, err := zip.OpenReader(w.fileName) + if err != nil { + return err + } + defer r.Close() + + for _, file := range r.File { + rc, err := file.Open() + if err != nil { + return err + } + defer rc.Close() + info := file.FileInfo() + if err := f(file.Name, info, rc); err != nil { + return err + } + } + + return nil +} + +func (w *zipWalker) get(desc v1.Descriptor, dst io.Writer) (int64, error) { + var bytes int64 + done := false + + expectedPath := filepath.Join("blobs", string(desc.Digest.Algorithm()), desc.Digest.Hex()) + + f := func(path string, info os.FileInfo, rdr io.Reader) error { + var err error + if done { + return nil + } + + if path == expectedPath && !info.IsDir() { + if bytes, err = io.Copy(dst, rdr); err != nil { + return errors.Wrapf(err, "get failed: failed to copy blob to destination") + } + done = true + } + return nil + } + + if err := w.walk(f); err != nil { + return 0, errors.Wrapf(err, "get failed: unable to walk") + } + if !done { + return 0, os.ErrNotExist + } + + return bytes, nil +} diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE new file mode 100644 index 0000000..2744858 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE new file mode 100644 index 0000000..5c97abc --- /dev/null +++ b/vendor/github.com/opencontainers/runc/NOTICE @@ -0,0 +1,17 @@ +runc + +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/README.md b/vendor/github.com/opencontainers/runc/README.md new file mode 100644 index 0000000..e755fb7 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/README.md @@ -0,0 +1,270 @@ +# runc + +[![Build Status](https://travis-ci.org/opencontainers/runc.svg?branch=master)](https://travis-ci.org/opencontainers/runc) +[![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/runc)](https://goreportcard.com/report/github.com/opencontainers/runc) +[![GoDoc](https://godoc.org/github.com/opencontainers/runc?status.svg)](https://godoc.org/github.com/opencontainers/runc) + +## Introduction + +`runc` is a CLI tool for spawning and running containers according to the OCI specification. + +## Releases + +`runc` depends on and tracks the [runtime-spec](https://github.com/opencontainers/runtime-spec) repository. +We will try to make sure that `runc` and the OCI specification major versions stay in lockstep. +This means that `runc` 1.0.0 should implement the 1.0 version of the specification. + +You can find official releases of `runc` on the [release](https://github.com/opencontainers/runc/releases) page. + +### Security + +If you wish to report a security issue, please disclose the issue responsibly +to security@opencontainers.org. + +## Building + +`runc` currently supports the Linux platform with various architecture support. +It must be built with Go version 1.6 or higher in order for some features to function properly. + +In order to enable seccomp support you will need to install `libseccomp` on your platform. +> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu + +Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make. + +```bash +# create a 'github.com/opencontainers' in your GOPATH/src +cd github.com/opencontainers +git clone https://github.com/opencontainers/runc +cd runc + +make +sudo make install +``` + +You can also use `go get` to install to your `GOPATH`, assuming that you have a `github.com` parent folder already created under `src`: + +```bash +go get github.com/opencontainers/runc +cd $GOPATH/src/github.com/opencontainers/runc +make +sudo make install +``` + +`runc` will be installed to `/usr/local/sbin/runc` on your system. + + +#### Build Tags + +`runc` supports optional build tags for compiling support of various features. +To add build tags to the make option the `BUILDTAGS` variable must be set. + +```bash +make BUILDTAGS='seccomp apparmor' +``` + +| Build Tag | Feature | Dependency | +|-----------|------------------------------------|-------------| +| seccomp | Syscall filtering | libseccomp | +| selinux | selinux process and mount labeling | | +| apparmor | apparmor profile support | | +| ambient | ambient capability support | kernel 4.3 | +| nokmem | disable kernel memory account | | + + +### Running the test suite + +`runc` currently supports running its test suite via Docker. +To run the suite just type `make test`. + +```bash +make test +``` + +There are additional make targets for running the tests outside of a container but this is not recommended as the tests are written with the expectation that they can write and remove anywhere. + +You can run a specific test case by setting the `TESTFLAGS` variable. + +```bash +# make test TESTFLAGS="-run=SomeTestFunction" +``` + +You can run a specific integration test by setting the `TESTPATH` variable. + +```bash +# make test TESTPATH="/checkpoint.bats" +``` + +You can run a test in your proxy environment by setting `DOCKER_BUILD_PROXY` and `DOCKER_RUN_PROXY` variables. + +```bash +# make test DOCKER_BUILD_PROXY="--build-arg HTTP_PROXY=http://yourproxy/" DOCKER_RUN_PROXY="-e HTTP_PROXY=http://yourproxy/" +``` + +### Dependencies Management + +`runc` uses [vndr](https://github.com/LK4D4/vndr) for dependencies management. +Please refer to [vndr](https://github.com/LK4D4/vndr) for how to add or update +new dependencies. + +## Using runc + +### Creating an OCI Bundle + +In order to use runc you must have your container in the format of an OCI bundle. +If you have Docker installed you can use its `export` method to acquire a root filesystem from an existing Docker container. + +```bash +# create the top most bundle directory +mkdir /mycontainer +cd /mycontainer + +# create the rootfs directory +mkdir rootfs + +# export busybox via Docker into the rootfs directory +docker export $(docker create busybox) | tar -C rootfs -xvf - +``` + +After a root filesystem is populated you just generate a spec in the format of a `config.json` file inside your bundle. +`runc` provides a `spec` command to generate a base template spec that you are then able to edit. +To find features and documentation for fields in the spec please refer to the [specs](https://github.com/opencontainers/runtime-spec) repository. + +```bash +runc spec +``` + +### Running Containers + +Assuming you have an OCI bundle from the previous step you can execute the container in two different ways. + +The first way is to use the convenience command `run` that will handle creating, starting, and deleting the container after it exits. + +```bash +# run as root +cd /mycontainer +runc run mycontainerid +``` + +If you used the unmodified `runc spec` template this should give you a `sh` session inside the container. + +The second way to start a container is using the specs lifecycle operations. +This gives you more power over how the container is created and managed while it is running. +This will also launch the container in the background so you will have to edit the `config.json` to remove the `terminal` setting for the simple examples here. +Your process field in the `config.json` should look like this below with `"terminal": false` and `"args": ["sleep", "5"]`. + + +```json + "process": { + "terminal": false, + "user": { + "uid": 0, + "gid": 0 + }, + "args": [ + "sleep", "5" + ], + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "TERM=xterm" + ], + "cwd": "/", + "capabilities": { + "bounding": [ + "CAP_AUDIT_WRITE", + "CAP_KILL", + "CAP_NET_BIND_SERVICE" + ], + "effective": [ + "CAP_AUDIT_WRITE", + "CAP_KILL", + "CAP_NET_BIND_SERVICE" + ], + "inheritable": [ + "CAP_AUDIT_WRITE", + "CAP_KILL", + "CAP_NET_BIND_SERVICE" + ], + "permitted": [ + "CAP_AUDIT_WRITE", + "CAP_KILL", + "CAP_NET_BIND_SERVICE" + ], + "ambient": [ + "CAP_AUDIT_WRITE", + "CAP_KILL", + "CAP_NET_BIND_SERVICE" + ] + }, + "rlimits": [ + { + "type": "RLIMIT_NOFILE", + "hard": 1024, + "soft": 1024 + } + ], + "noNewPrivileges": true + }, +``` + +Now we can go through the lifecycle operations in your shell. + + +```bash +# run as root +cd /mycontainer +runc create mycontainerid + +# view the container is created and in the "created" state +runc list + +# start the process inside the container +runc start mycontainerid + +# after 5 seconds view that the container has exited and is now in the stopped state +runc list + +# now delete the container +runc delete mycontainerid +``` + +This allows higher level systems to augment the containers creation logic with setup of various settings after the container is created and/or before it is deleted. For example, the container's network stack is commonly set up after `create` but before `start`. + +#### Rootless containers +`runc` has the ability to run containers without root privileges. This is called `rootless`. You need to pass some parameters to `runc` in order to run rootless containers. See below and compare with the previous version. Run the following commands as an ordinary user: +```bash +# Same as the first example +mkdir ~/mycontainer +cd ~/mycontainer +mkdir rootfs +docker export $(docker create busybox) | tar -C rootfs -xvf - + +# The --rootless parameter instructs runc spec to generate a configuration for a rootless container, which will allow you to run the container as a non-root user. +runc spec --rootless + +# The --root parameter tells runc where to store the container state. It must be writable by the user. +runc --root /tmp/runc run mycontainerid +``` + +#### Supervisors + +`runc` can be used with process supervisors and init systems to ensure that containers are restarted when they exit. +An example systemd unit file looks something like this. + +```systemd +[Unit] +Description=Start My Container + +[Service] +Type=forking +ExecStart=/usr/local/sbin/runc run -d --pid-file /run/mycontainerid.pid mycontainerid +ExecStopPost=/usr/local/sbin/runc delete mycontainerid +WorkingDirectory=/mycontainer +PIDFile=/run/mycontainerid.pid + +[Install] +WantedBy=multi-user.target +``` + +## License + +The code and docs are released under the [Apache 2.0 license](LICENSE). diff --git a/vendor/github.com/opencontainers/runc/libcontainer/README.md b/vendor/github.com/opencontainers/runc/libcontainer/README.md new file mode 100644 index 0000000..1d7fa04 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/README.md @@ -0,0 +1,330 @@ +# libcontainer + +[![GoDoc](https://godoc.org/github.com/opencontainers/runc/libcontainer?status.svg)](https://godoc.org/github.com/opencontainers/runc/libcontainer) + +Libcontainer provides a native Go implementation for creating containers +with namespaces, cgroups, capabilities, and filesystem access controls. +It allows you to manage the lifecycle of the container performing additional operations +after the container is created. + + +#### Container +A container is a self contained execution environment that shares the kernel of the +host system and which is (optionally) isolated from other containers in the system. + +#### Using libcontainer + +Because containers are spawned in a two step process you will need a binary that +will be executed as the init process for the container. In libcontainer, we use +the current binary (/proc/self/exe) to be executed as the init process, and use +arg "init", we call the first step process "bootstrap", so you always need a "init" +function as the entry of "bootstrap". + +In addition to the go init function the early stage bootstrap is handled by importing +[nsenter](https://github.com/opencontainers/runc/blob/master/libcontainer/nsenter/README.md). + +```go +import ( + _ "github.com/opencontainers/runc/libcontainer/nsenter" +) + +func init() { + if len(os.Args) > 1 && os.Args[1] == "init" { + runtime.GOMAXPROCS(1) + runtime.LockOSThread() + factory, _ := libcontainer.New("") + if err := factory.StartInitialization(); err != nil { + logrus.Fatal(err) + } + panic("--this line should have never been executed, congratulations--") + } +} +``` + +Then to create a container you first have to initialize an instance of a factory +that will handle the creation and initialization for a container. + +```go +factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init")) +if err != nil { + logrus.Fatal(err) + return +} +``` + +Once you have an instance of the factory created we can create a configuration +struct describing how the container is to be created. A sample would look similar to this: + +```go +defaultMountFlags := unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV +config := &configs.Config{ + Rootfs: "/your/path/to/rootfs", + Capabilities: &configs.Capabilities{ + Bounding: []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + }, + Effective: []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + }, + Inheritable: []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + }, + Permitted: []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + }, + Ambient: []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + }, + }, + Namespaces: configs.Namespaces([]configs.Namespace{ + {Type: configs.NEWNS}, + {Type: configs.NEWUTS}, + {Type: configs.NEWIPC}, + {Type: configs.NEWPID}, + {Type: configs.NEWUSER}, + {Type: configs.NEWNET}, + {Type: configs.NEWCGROUP}, + }), + Cgroups: &configs.Cgroup{ + Name: "test-container", + Parent: "system", + Resources: &configs.Resources{ + MemorySwappiness: nil, + AllowAllDevices: nil, + AllowedDevices: configs.DefaultAllowedDevices, + }, + }, + MaskPaths: []string{ + "/proc/kcore", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus", + }, + Devices: configs.DefaultAutoCreatedDevices, + Hostname: "testing", + Mounts: []*configs.Mount{ + { + Source: "proc", + Destination: "/proc", + Device: "proc", + Flags: defaultMountFlags, + }, + { + Source: "tmpfs", + Destination: "/dev", + Device: "tmpfs", + Flags: unix.MS_NOSUID | unix.MS_STRICTATIME, + Data: "mode=755", + }, + { + Source: "devpts", + Destination: "/dev/pts", + Device: "devpts", + Flags: unix.MS_NOSUID | unix.MS_NOEXEC, + Data: "newinstance,ptmxmode=0666,mode=0620,gid=5", + }, + { + Device: "tmpfs", + Source: "shm", + Destination: "/dev/shm", + Data: "mode=1777,size=65536k", + Flags: defaultMountFlags, + }, + { + Source: "mqueue", + Destination: "/dev/mqueue", + Device: "mqueue", + Flags: defaultMountFlags, + }, + { + Source: "sysfs", + Destination: "/sys", + Device: "sysfs", + Flags: defaultMountFlags | unix.MS_RDONLY, + }, + }, + UidMappings: []configs.IDMap{ + { + ContainerID: 0, + HostID: 1000, + Size: 65536, + }, + }, + GidMappings: []configs.IDMap{ + { + ContainerID: 0, + HostID: 1000, + Size: 65536, + }, + }, + Networks: []*configs.Network{ + { + Type: "loopback", + Address: "127.0.0.1/0", + Gateway: "localhost", + }, + }, + Rlimits: []configs.Rlimit{ + { + Type: unix.RLIMIT_NOFILE, + Hard: uint64(1025), + Soft: uint64(1025), + }, + }, +} +``` + +Once you have the configuration populated you can create a container: + +```go +container, err := factory.Create("container-id", config) +if err != nil { + logrus.Fatal(err) + return +} +``` + +To spawn bash as the initial process inside the container and have the +processes pid returned in order to wait, signal, or kill the process: + +```go +process := &libcontainer.Process{ + Args: []string{"/bin/bash"}, + Env: []string{"PATH=/bin"}, + User: "daemon", + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, +} + +err := container.Run(process) +if err != nil { + container.Destroy() + logrus.Fatal(err) + return +} + +// wait for the process to finish. +_, err := process.Wait() +if err != nil { + logrus.Fatal(err) +} + +// destroy the container. +container.Destroy() +``` + +Additional ways to interact with a running container are: + +```go +// return all the pids for all processes running inside the container. +processes, err := container.Processes() + +// get detailed cpu, memory, io, and network statistics for the container and +// it's processes. +stats, err := container.Stats() + +// pause all processes inside the container. +container.Pause() + +// resume all paused processes. +container.Resume() + +// send signal to container's init process. +container.Signal(signal) + +// update container resource constraints. +container.Set(config) + +// get current status of the container. +status, err := container.Status() + +// get current container's state information. +state, err := container.State() +``` + + +#### Checkpoint & Restore + +libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers. +This let's you save the state of a process running inside a container to disk, and then restore +that state into a new process, on the same machine or on another machine. + +`criu` version 1.5.2 or higher is required to use checkpoint and restore. +If you don't already have `criu` installed, you can build it from source, following the +[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image +generated when building libcontainer with docker. + + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. +The code and documentation are released under the [Apache 2.0 license](../LICENSE). +The documentation is also released under Creative Commons Attribution 4.0 International License. +You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md new file mode 100644 index 0000000..9ec6c39 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md @@ -0,0 +1,44 @@ +## nsenter + +The `nsenter` package registers a special init constructor that is called before +the Go runtime has a chance to boot. This provides us the ability to `setns` on +existing namespaces and avoid the issues that the Go runtime has with multiple +threads. This constructor will be called if this package is registered, +imported, in your go application. + +The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/) +package. In cgo, if the import of "C" is immediately preceded by a comment, that comment, +called the preamble, is used as a header when compiling the C parts of the package. +So every time we import package `nsenter`, the C code function `nsexec()` would be +called. And package `nsenter` is only imported in `init.go`, so every time the runc +`init` command is invoked, that C code is run. + +Because `nsexec()` must be run before the Go runtime in order to use the +Linux kernel namespace, you must `import` this library into a package if +you plan to use `libcontainer` directly. Otherwise Go will not execute +the `nsexec()` constructor, which means that the re-exec will not cause +the namespaces to be joined. You can import it like this: + +```go +import _ "github.com/opencontainers/runc/libcontainer/nsenter" +``` + +`nsexec()` will first get the file descriptor number for the init pipe +from the environment variable `_LIBCONTAINER_INITPIPE` (which was opened +by the parent and kept open across the fork-exec of the `nsexec()` init +process). The init pipe is used to read bootstrap data (namespace paths, +clone flags, uid and gid mappings, and the console path) from the parent +process. `nsexec()` will then call `setns(2)` to join the namespaces +provided in the bootstrap data (if available), `clone(2)` a child process +with the provided clone flags, update the user and group ID mappings, do +some further miscellaneous setup steps, and then send the PID of the +child process to the parent of the `nsexec()` "caller". Finally, +the parent `nsexec()` will exit and the child `nsexec()` process will +return to allow the Go runtime take over. + +NOTE: We do both `setns(2)` and `clone(2)` even if we don't have any +`CLONE_NEW*` clone flags because we must fork a new process in order to +enter the PID namespace. + + + diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h new file mode 100644 index 0000000..9e9bdca --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h @@ -0,0 +1,32 @@ +#ifndef NSENTER_NAMESPACE_H +#define NSENTER_NAMESPACE_H + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif +#include + +/* All of these are taken from include/uapi/linux/sched.h */ +#ifndef CLONE_NEWNS +# define CLONE_NEWNS 0x00020000 /* New mount namespace group */ +#endif +#ifndef CLONE_NEWCGROUP +# define CLONE_NEWCGROUP 0x02000000 /* New cgroup namespace */ +#endif +#ifndef CLONE_NEWUTS +# define CLONE_NEWUTS 0x04000000 /* New utsname namespace */ +#endif +#ifndef CLONE_NEWIPC +# define CLONE_NEWIPC 0x08000000 /* New ipc namespace */ +#endif +#ifndef CLONE_NEWUSER +# define CLONE_NEWUSER 0x10000000 /* New user namespace */ +#endif +#ifndef CLONE_NEWPID +# define CLONE_NEWPID 0x20000000 /* New pid namespace */ +#endif +#ifndef CLONE_NEWNET +# define CLONE_NEWNET 0x40000000 /* New network namespace */ +#endif + +#endif /* NSENTER_NAMESPACE_H */ diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go new file mode 100644 index 0000000..07f4d63 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go @@ -0,0 +1,12 @@ +// +build linux,!gccgo + +package nsenter + +/* +#cgo CFLAGS: -Wall +extern void nsexec(); +void __attribute__((constructor)) init(void) { + nsexec(); +} +*/ +import "C" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go new file mode 100644 index 0000000..63c7a3e --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go @@ -0,0 +1,25 @@ +// +build linux,gccgo + +package nsenter + +/* +#cgo CFLAGS: -Wall +extern void nsexec(); +void __attribute__((constructor)) init(void) { + nsexec(); +} +*/ +import "C" + +// AlwaysFalse is here to stay false +// (and be exported so the compiler doesn't optimize out its reference) +var AlwaysFalse bool + +func init() { + if AlwaysFalse { + // by referencing this C init() in a noop test, it will ensure the compiler + // links in the C function. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 + C.init() + } +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go new file mode 100644 index 0000000..ac701ca --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux !cgo + +package nsenter + +import "C" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c new file mode 100644 index 0000000..28269df --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c @@ -0,0 +1,995 @@ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Get all of the CLONE_NEW* flags. */ +#include "namespace.h" + +/* Synchronisation values. */ +enum sync_t { + SYNC_USERMAP_PLS = 0x40, /* Request parent to map our users. */ + SYNC_USERMAP_ACK = 0x41, /* Mapping finished by the parent. */ + SYNC_RECVPID_PLS = 0x42, /* Tell parent we're sending the PID. */ + SYNC_RECVPID_ACK = 0x43, /* PID was correctly received by parent. */ + SYNC_GRANDCHILD = 0x44, /* The grandchild is ready to run. */ + SYNC_CHILD_READY = 0x45, /* The child or grandchild is ready to return. */ + + /* XXX: This doesn't help with segfaults and other such issues. */ + SYNC_ERR = 0xFF, /* Fatal error, no turning back. The error code follows. */ +}; + +/* + * Synchronisation value for cgroup namespace setup. + * The same constant is defined in process_linux.go as "createCgroupns". + */ +#define CREATECGROUPNS 0x80 + +/* longjmp() arguments. */ +#define JUMP_PARENT 0x00 +#define JUMP_CHILD 0xA0 +#define JUMP_INIT 0xA1 + +/* JSON buffer. */ +#define JSON_MAX 4096 + +/* Assume the stack grows down, so arguments should be above it. */ +struct clone_t { + /* + * Reserve some space for clone() to locate arguments + * and retcode in this place + */ + char stack[4096] __attribute__ ((aligned(16))); + char stack_ptr[0]; + + /* There's two children. This is used to execute the different code. */ + jmp_buf *env; + int jmpval; +}; + +struct nlconfig_t { + char *data; + + /* Process settings. */ + uint32_t cloneflags; + char *oom_score_adj; + size_t oom_score_adj_len; + + /* User namespace settings. */ + char *uidmap; + size_t uidmap_len; + char *gidmap; + size_t gidmap_len; + char *namespaces; + size_t namespaces_len; + uint8_t is_setgroup; + + /* Rootless container settings. */ + uint8_t is_rootless_euid; /* boolean */ + char *uidmappath; + size_t uidmappath_len; + char *gidmappath; + size_t gidmappath_len; +}; + +/* + * List of netlink message types sent to us as part of bootstrapping the init. + * These constants are defined in libcontainer/message_linux.go. + */ +#define INIT_MSG 62000 +#define CLONE_FLAGS_ATTR 27281 +#define NS_PATHS_ATTR 27282 +#define UIDMAP_ATTR 27283 +#define GIDMAP_ATTR 27284 +#define SETGROUP_ATTR 27285 +#define OOM_SCORE_ADJ_ATTR 27286 +#define ROOTLESS_EUID_ATTR 27287 +#define UIDMAPPATH_ATTR 27288 +#define GIDMAPPATH_ATTR 27289 + +/* + * Use the raw syscall for versions of glibc which don't include a function for + * it, namely (glibc 2.12). + */ +#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14 +# define _GNU_SOURCE +# include "syscall.h" +# if !defined(SYS_setns) && defined(__NR_setns) +# define SYS_setns __NR_setns +# endif + +#ifndef SYS_setns +# error "setns(2) syscall not supported by glibc version" +#endif + +int setns(int fd, int nstype) +{ + return syscall(SYS_setns, fd, nstype); +} +#endif + +/* XXX: This is ugly. */ +static int syncfd = -1; + +/* TODO(cyphar): Fix this so it correctly deals with syncT. */ +#define bail(fmt, ...) \ + do { \ + int ret = __COUNTER__ + 1; \ + fprintf(stderr, "nsenter: " fmt ": %m\n", ##__VA_ARGS__); \ + if (syncfd >= 0) { \ + enum sync_t s = SYNC_ERR; \ + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) \ + fprintf(stderr, "nsenter: failed: write(s)"); \ + if (write(syncfd, &ret, sizeof(ret)) != sizeof(ret)) \ + fprintf(stderr, "nsenter: failed: write(ret)"); \ + } \ + exit(ret); \ + } while(0) + +static int write_file(char *data, size_t data_len, char *pathfmt, ...) +{ + int fd, len, ret = 0; + char path[PATH_MAX]; + + va_list ap; + va_start(ap, pathfmt); + len = vsnprintf(path, PATH_MAX, pathfmt, ap); + va_end(ap); + if (len < 0) + return -1; + + fd = open(path, O_RDWR); + if (fd < 0) { + return -1; + } + + len = write(fd, data, data_len); + if (len != data_len) { + ret = -1; + goto out; + } + + out: + close(fd); + return ret; +} + +enum policy_t { + SETGROUPS_DEFAULT = 0, + SETGROUPS_ALLOW, + SETGROUPS_DENY, +}; + +/* This *must* be called before we touch gid_map. */ +static void update_setgroups(int pid, enum policy_t setgroup) +{ + char *policy; + + switch (setgroup) { + case SETGROUPS_ALLOW: + policy = "allow"; + break; + case SETGROUPS_DENY: + policy = "deny"; + break; + case SETGROUPS_DEFAULT: + default: + /* Nothing to do. */ + return; + } + + if (write_file(policy, strlen(policy), "/proc/%d/setgroups", pid) < 0) { + /* + * If the kernel is too old to support /proc/pid/setgroups, + * open(2) or write(2) will return ENOENT. This is fine. + */ + if (errno != ENOENT) + bail("failed to write '%s' to /proc/%d/setgroups", policy, pid); + } +} + +static int try_mapping_tool(const char *app, int pid, char *map, size_t map_len) +{ + int child; + + /* + * If @app is NULL, execve will segfault. Just check it here and bail (if + * we're in this path, the caller is already getting desperate and there + * isn't a backup to this failing). This usually would be a configuration + * or programming issue. + */ + if (!app) + bail("mapping tool not present"); + + child = fork(); + if (child < 0) + bail("failed to fork"); + + if (!child) { +#define MAX_ARGV 20 + char *argv[MAX_ARGV]; + char *envp[] = { NULL }; + char pid_fmt[16]; + int argc = 0; + char *next; + + snprintf(pid_fmt, 16, "%d", pid); + + argv[argc++] = (char *)app; + argv[argc++] = pid_fmt; + /* + * Convert the map string into a list of argument that + * newuidmap/newgidmap can understand. + */ + + while (argc < MAX_ARGV) { + if (*map == '\0') { + argv[argc++] = NULL; + break; + } + argv[argc++] = map; + next = strpbrk(map, "\n "); + if (next == NULL) + break; + *next++ = '\0'; + map = next + strspn(next, "\n "); + } + + execve(app, argv, envp); + bail("failed to execv"); + } else { + int status; + + while (true) { + if (waitpid(child, &status, 0) < 0) { + if (errno == EINTR) + continue; + bail("failed to waitpid"); + } + if (WIFEXITED(status) || WIFSIGNALED(status)) + return WEXITSTATUS(status); + } + } + + return -1; +} + +static void update_uidmap(const char *path, int pid, char *map, size_t map_len) +{ + if (map == NULL || map_len <= 0) + return; + + if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0) { + if (errno != EPERM) + bail("failed to update /proc/%d/uid_map", pid); + if (try_mapping_tool(path, pid, map, map_len)) + bail("failed to use newuid map on %d", pid); + } +} + +static void update_gidmap(const char *path, int pid, char *map, size_t map_len) +{ + if (map == NULL || map_len <= 0) + return; + + if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0) { + if (errno != EPERM) + bail("failed to update /proc/%d/gid_map", pid); + if (try_mapping_tool(path, pid, map, map_len)) + bail("failed to use newgid map on %d", pid); + } +} + +static void update_oom_score_adj(char *data, size_t len) +{ + if (data == NULL || len <= 0) + return; + + if (write_file(data, len, "/proc/self/oom_score_adj") < 0) + bail("failed to update /proc/self/oom_score_adj"); +} + +/* A dummy function that just jumps to the given jumpval. */ +static int child_func(void *arg) __attribute__ ((noinline)); +static int child_func(void *arg) +{ + struct clone_t *ca = (struct clone_t *)arg; + longjmp(*ca->env, ca->jmpval); +} + +static int clone_parent(jmp_buf *env, int jmpval) __attribute__ ((noinline)); +static int clone_parent(jmp_buf *env, int jmpval) +{ + struct clone_t ca = { + .env = env, + .jmpval = jmpval, + }; + + return clone(child_func, ca.stack_ptr, CLONE_PARENT | SIGCHLD, &ca); +} + +/* + * Gets the init pipe fd from the environment, which is used to read the + * bootstrap data and tell the parent what the new pid is after we finish + * setting up the environment. + */ +static int initpipe(void) +{ + int pipenum; + char *initpipe, *endptr; + + initpipe = getenv("_LIBCONTAINER_INITPIPE"); + if (initpipe == NULL || *initpipe == '\0') + return -1; + + pipenum = strtol(initpipe, &endptr, 10); + if (*endptr != '\0') + bail("unable to parse _LIBCONTAINER_INITPIPE"); + + return pipenum; +} + +/* Returns the clone(2) flag for a namespace, given the name of a namespace. */ +static int nsflag(char *name) +{ + if (!strcmp(name, "cgroup")) + return CLONE_NEWCGROUP; + else if (!strcmp(name, "ipc")) + return CLONE_NEWIPC; + else if (!strcmp(name, "mnt")) + return CLONE_NEWNS; + else if (!strcmp(name, "net")) + return CLONE_NEWNET; + else if (!strcmp(name, "pid")) + return CLONE_NEWPID; + else if (!strcmp(name, "user")) + return CLONE_NEWUSER; + else if (!strcmp(name, "uts")) + return CLONE_NEWUTS; + + /* If we don't recognise a name, fallback to 0. */ + return 0; +} + +static uint32_t readint32(char *buf) +{ + return *(uint32_t *) buf; +} + +static uint8_t readint8(char *buf) +{ + return *(uint8_t *) buf; +} + +static void nl_parse(int fd, struct nlconfig_t *config) +{ + size_t len, size; + struct nlmsghdr hdr; + char *data, *current; + + /* Retrieve the netlink header. */ + len = read(fd, &hdr, NLMSG_HDRLEN); + if (len != NLMSG_HDRLEN) + bail("invalid netlink header length %zu", len); + + if (hdr.nlmsg_type == NLMSG_ERROR) + bail("failed to read netlink message"); + + if (hdr.nlmsg_type != INIT_MSG) + bail("unexpected msg type %d", hdr.nlmsg_type); + + /* Retrieve data. */ + size = NLMSG_PAYLOAD(&hdr, 0); + current = data = malloc(size); + if (!data) + bail("failed to allocate %zu bytes of memory for nl_payload", size); + + len = read(fd, data, size); + if (len != size) + bail("failed to read netlink payload, %zu != %zu", len, size); + + /* Parse the netlink payload. */ + config->data = data; + while (current < data + size) { + struct nlattr *nlattr = (struct nlattr *)current; + size_t payload_len = nlattr->nla_len - NLA_HDRLEN; + + /* Advance to payload. */ + current += NLA_HDRLEN; + + /* Handle payload. */ + switch (nlattr->nla_type) { + case CLONE_FLAGS_ATTR: + config->cloneflags = readint32(current); + break; + case ROOTLESS_EUID_ATTR: + config->is_rootless_euid = readint8(current); /* boolean */ + break; + case OOM_SCORE_ADJ_ATTR: + config->oom_score_adj = current; + config->oom_score_adj_len = payload_len; + break; + case NS_PATHS_ATTR: + config->namespaces = current; + config->namespaces_len = payload_len; + break; + case UIDMAP_ATTR: + config->uidmap = current; + config->uidmap_len = payload_len; + break; + case GIDMAP_ATTR: + config->gidmap = current; + config->gidmap_len = payload_len; + break; + case UIDMAPPATH_ATTR: + config->uidmappath = current; + config->uidmappath_len = payload_len; + break; + case GIDMAPPATH_ATTR: + config->gidmappath = current; + config->gidmappath_len = payload_len; + break; + case SETGROUP_ATTR: + config->is_setgroup = readint8(current); + break; + default: + bail("unknown netlink message type %d", nlattr->nla_type); + } + + current += NLA_ALIGN(payload_len); + } +} + +void nl_free(struct nlconfig_t *config) +{ + free(config->data); +} + +void join_namespaces(char *nslist) +{ + int num = 0, i; + char *saveptr = NULL; + char *namespace = strtok_r(nslist, ",", &saveptr); + struct namespace_t { + int fd; + int ns; + char type[PATH_MAX]; + char path[PATH_MAX]; + } *namespaces = NULL; + + if (!namespace || !strlen(namespace) || !strlen(nslist)) + bail("ns paths are empty"); + + /* + * We have to open the file descriptors first, since after + * we join the mnt namespace we might no longer be able to + * access the paths. + */ + do { + int fd; + char *path; + struct namespace_t *ns; + + /* Resize the namespace array. */ + namespaces = realloc(namespaces, ++num * sizeof(struct namespace_t)); + if (!namespaces) + bail("failed to reallocate namespace array"); + ns = &namespaces[num - 1]; + + /* Split 'ns:path'. */ + path = strstr(namespace, ":"); + if (!path) + bail("failed to parse %s", namespace); + *path++ = '\0'; + + fd = open(path, O_RDONLY); + if (fd < 0) + bail("failed to open %s", path); + + ns->fd = fd; + ns->ns = nsflag(namespace); + strncpy(ns->path, path, PATH_MAX - 1); + ns->path[PATH_MAX - 1] = '\0'; + } while ((namespace = strtok_r(NULL, ",", &saveptr)) != NULL); + + /* + * The ordering in which we join namespaces is important. We should + * always join the user namespace *first*. This is all guaranteed + * from the container_linux.go side of this, so we're just going to + * follow the order given to us. + */ + + for (i = 0; i < num; i++) { + struct namespace_t ns = namespaces[i]; + + if (setns(ns.fd, ns.ns) < 0) + bail("failed to setns to %s", ns.path); + + close(ns.fd); + } + + free(namespaces); +} + +void nsexec(void) +{ + int pipenum; + jmp_buf env; + int sync_child_pipe[2], sync_grandchild_pipe[2]; + struct nlconfig_t config = { 0 }; + + /* + * If we don't have an init pipe, just return to the go routine. + * We'll only get an init pipe for start or exec. + */ + pipenum = initpipe(); + if (pipenum == -1) + return; + + /* Parse all of the netlink configuration. */ + nl_parse(pipenum, &config); + + /* Set oom_score_adj. This has to be done before !dumpable because + * /proc/self/oom_score_adj is not writeable unless you're an privileged + * user (if !dumpable is set). All children inherit their parent's + * oom_score_adj value on fork(2) so this will always be propagated + * properly. + */ + update_oom_score_adj(config.oom_score_adj, config.oom_score_adj_len); + + /* + * Make the process non-dumpable, to avoid various race conditions that + * could cause processes in namespaces we're joining to access host + * resources (or potentially execute code). + * + * However, if the number of namespaces we are joining is 0, we are not + * going to be switching to a different security context. Thus setting + * ourselves to be non-dumpable only breaks things (like rootless + * containers), which is the recommendation from the kernel folks. + */ + if (config.namespaces) { + if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0) + bail("failed to set process as non-dumpable"); + } + + /* Pipe so we can tell the child when we've finished setting up. */ + if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_child_pipe) < 0) + bail("failed to setup sync pipe between parent and child"); + + /* + * We need a new socketpair to sync with grandchild so we don't have + * race condition with child. + */ + if (socketpair(AF_LOCAL, SOCK_STREAM, 0, sync_grandchild_pipe) < 0) + bail("failed to setup sync pipe between parent and grandchild"); + + /* TODO: Currently we aren't dealing with child deaths properly. */ + + /* + * Okay, so this is quite annoying. + * + * In order for this unsharing code to be more extensible we need to split + * up unshare(CLONE_NEWUSER) and clone() in various ways. The ideal case + * would be if we did clone(CLONE_NEWUSER) and the other namespaces + * separately, but because of SELinux issues we cannot really do that. But + * we cannot just dump the namespace flags into clone(...) because several + * usecases (such as rootless containers) require more granularity around + * the namespace setup. In addition, some older kernels had issues where + * CLONE_NEWUSER wasn't handled before other namespaces (but we cannot + * handle this while also dealing with SELinux so we choose SELinux support + * over broken kernel support). + * + * However, if we unshare(2) the user namespace *before* we clone(2), then + * all hell breaks loose. + * + * The parent no longer has permissions to do many things (unshare(2) drops + * all capabilities in your old namespace), and the container cannot be set + * up to have more than one {uid,gid} mapping. This is obviously less than + * ideal. In order to fix this, we have to first clone(2) and then unshare. + * + * Unfortunately, it's not as simple as that. We have to fork to enter the + * PID namespace (the PID namespace only applies to children). Since we'll + * have to double-fork, this clone_parent() call won't be able to get the + * PID of the _actual_ init process (without doing more synchronisation than + * I can deal with at the moment). So we'll just get the parent to send it + * for us, the only job of this process is to update + * /proc/pid/{setgroups,uid_map,gid_map}. + * + * And as a result of the above, we also need to setns(2) in the first child + * because if we join a PID namespace in the topmost parent then our child + * will be in that namespace (and it will not be able to give us a PID value + * that makes sense without resorting to sending things with cmsg). + * + * This also deals with an older issue caused by dumping cloneflags into + * clone(2): On old kernels, CLONE_PARENT didn't work with CLONE_NEWPID, so + * we have to unshare(2) before clone(2) in order to do this. This was fixed + * in upstream commit 1f7f4dde5c945f41a7abc2285be43d918029ecc5, and was + * introduced by 40a0d32d1eaffe6aac7324ca92604b6b3977eb0e. As far as we're + * aware, the last mainline kernel which had this bug was Linux 3.12. + * However, we cannot comment on which kernels the broken patch was + * backported to. + * + * -- Aleksa "what has my life come to?" Sarai + */ + + switch (setjmp(env)) { + /* + * Stage 0: We're in the parent. Our job is just to create a new child + * (stage 1: JUMP_CHILD) process and write its uid_map and + * gid_map. That process will go on to create a new process, then + * it will send us its PID which we will send to the bootstrap + * process. + */ + case JUMP_PARENT:{ + int len; + pid_t child, first_child = -1; + bool ready = false; + + /* For debugging. */ + prctl(PR_SET_NAME, (unsigned long)"runc:[0:PARENT]", 0, 0, 0); + + /* Start the process of getting a container. */ + child = clone_parent(&env, JUMP_CHILD); + if (child < 0) + bail("unable to fork: child_func"); + + /* + * State machine for synchronisation with the children. + * + * Father only return when both child and grandchild are + * ready, so we can receive all possible error codes + * generated by children. + */ + while (!ready) { + enum sync_t s; + int ret; + + syncfd = sync_child_pipe[1]; + close(sync_child_pipe[0]); + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with child: next state"); + + switch (s) { + case SYNC_ERR: + /* We have to mirror the error code of the child. */ + if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret)) + bail("failed to sync with child: read(error code)"); + + exit(ret); + case SYNC_USERMAP_PLS: + /* + * Enable setgroups(2) if we've been asked to. But we also + * have to explicitly disable setgroups(2) if we're + * creating a rootless container for single-entry mapping. + * i.e. config.is_setgroup == false. + * (this is required since Linux 3.19). + * + * For rootless multi-entry mapping, config.is_setgroup shall be true and + * newuidmap/newgidmap shall be used. + */ + + if (config.is_rootless_euid && !config.is_setgroup) + update_setgroups(child, SETGROUPS_DENY); + + /* Set up mappings. */ + update_uidmap(config.uidmappath, child, config.uidmap, config.uidmap_len); + update_gidmap(config.gidmappath, child, config.gidmap, config.gidmap_len); + + s = SYNC_USERMAP_ACK; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with child: write(SYNC_USERMAP_ACK)"); + } + break; + case SYNC_RECVPID_PLS:{ + first_child = child; + + /* Get the init_func pid. */ + if (read(syncfd, &child, sizeof(child)) != sizeof(child)) { + kill(first_child, SIGKILL); + bail("failed to sync with child: read(childpid)"); + } + + /* Send ACK. */ + s = SYNC_RECVPID_ACK; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(first_child, SIGKILL); + kill(child, SIGKILL); + bail("failed to sync with child: write(SYNC_RECVPID_ACK)"); + } + + /* Send the init_func pid back to our parent. + * + * Send the init_func pid and the pid of the first child back to our parent. + * We need to send both back because we can't reap the first child we created (CLONE_PARENT). + * It becomes the responsibility of our parent to reap the first child. + */ + len = dprintf(pipenum, "{\"pid\": %d, \"pid_first\": %d}\n", child, first_child); + if (len < 0) { + kill(child, SIGKILL); + bail("unable to generate JSON for child pid"); + } + } + break; + case SYNC_CHILD_READY: + ready = true; + break; + default: + bail("unexpected sync value: %u", s); + } + } + + /* Now sync with grandchild. */ + + ready = false; + while (!ready) { + enum sync_t s; + int ret; + + syncfd = sync_grandchild_pipe[1]; + close(sync_grandchild_pipe[0]); + + s = SYNC_GRANDCHILD; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with child: write(SYNC_GRANDCHILD)"); + } + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with child: next state"); + + switch (s) { + case SYNC_ERR: + /* We have to mirror the error code of the child. */ + if (read(syncfd, &ret, sizeof(ret)) != sizeof(ret)) + bail("failed to sync with child: read(error code)"); + + exit(ret); + case SYNC_CHILD_READY: + ready = true; + break; + default: + bail("unexpected sync value: %u", s); + } + } + exit(0); + } + + /* + * Stage 1: We're in the first child process. Our job is to join any + * provided namespaces in the netlink payload and unshare all + * of the requested namespaces. If we've been asked to + * CLONE_NEWUSER, we will ask our parent (stage 0) to set up + * our user mappings for us. Then, we create a new child + * (stage 2: JUMP_INIT) for PID namespace. We then send the + * child's PID to our parent (stage 0). + */ + case JUMP_CHILD:{ + pid_t child; + enum sync_t s; + + /* We're in a child and thus need to tell the parent if we die. */ + syncfd = sync_child_pipe[0]; + close(sync_child_pipe[1]); + + /* For debugging. */ + prctl(PR_SET_NAME, (unsigned long)"runc:[1:CHILD]", 0, 0, 0); + + /* + * We need to setns first. We cannot do this earlier (in stage 0) + * because of the fact that we forked to get here (the PID of + * [stage 2: JUMP_INIT]) would be meaningless). We could send it + * using cmsg(3) but that's just annoying. + */ + if (config.namespaces) + join_namespaces(config.namespaces); + + /* + * Deal with user namespaces first. They are quite special, as they + * affect our ability to unshare other namespaces and are used as + * context for privilege checks. + * + * We don't unshare all namespaces in one go. The reason for this + * is that, while the kernel documentation may claim otherwise, + * there are certain cases where unsharing all namespaces at once + * will result in namespace objects being owned incorrectly. + * Ideally we should just fix these kernel bugs, but it's better to + * be safe than sorry, and fix them separately. + * + * A specific case of this is that the SELinux label of the + * internal kern-mount that mqueue uses will be incorrect if the + * UTS namespace is cloned before the USER namespace is mapped. + * I've also heard of similar problems with the network namespace + * in some scenarios. This also mirrors how LXC deals with this + * problem. + */ + if (config.cloneflags & CLONE_NEWUSER) { + if (unshare(CLONE_NEWUSER) < 0) + bail("failed to unshare user namespace"); + config.cloneflags &= ~CLONE_NEWUSER; + + /* + * We don't have the privileges to do any mapping here (see the + * clone_parent rant). So signal our parent to hook us up. + */ + + /* Switching is only necessary if we joined namespaces. */ + if (config.namespaces) { + if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) < 0) + bail("failed to set process as dumpable"); + } + s = SYNC_USERMAP_PLS; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with parent: write(SYNC_USERMAP_PLS)"); + + /* ... wait for mapping ... */ + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with parent: read(SYNC_USERMAP_ACK)"); + if (s != SYNC_USERMAP_ACK) + bail("failed to sync with parent: SYNC_USERMAP_ACK: got %u", s); + /* Switching is only necessary if we joined namespaces. */ + if (config.namespaces) { + if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) < 0) + bail("failed to set process as dumpable"); + } + + /* Become root in the namespace proper. */ + if (setresuid(0, 0, 0) < 0) + bail("failed to become root in user namespace"); + } + /* + * Unshare all of the namespaces. Now, it should be noted that this + * ordering might break in the future (especially with rootless + * containers). But for now, it's not possible to split this into + * CLONE_NEWUSER + [the rest] because of some RHEL SELinux issues. + * + * Note that we don't merge this with clone() because there were + * some old kernel versions where clone(CLONE_PARENT | CLONE_NEWPID) + * was broken, so we'll just do it the long way anyway. + */ + if (unshare(config.cloneflags & ~CLONE_NEWCGROUP) < 0) + bail("failed to unshare namespaces"); + + /* + * TODO: What about non-namespace clone flags that we're dropping here? + * + * We fork again because of PID namespace, setns(2) or unshare(2) don't + * change the PID namespace of the calling process, because doing so + * would change the caller's idea of its own PID (as reported by getpid()), + * which would break many applications and libraries, so we must fork + * to actually enter the new PID namespace. + */ + child = clone_parent(&env, JUMP_INIT); + if (child < 0) + bail("unable to fork: init_func"); + + /* Send the child to our parent, which knows what it's doing. */ + s = SYNC_RECVPID_PLS; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with parent: write(SYNC_RECVPID_PLS)"); + } + if (write(syncfd, &child, sizeof(child)) != sizeof(child)) { + kill(child, SIGKILL); + bail("failed to sync with parent: write(childpid)"); + } + + /* ... wait for parent to get the pid ... */ + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with parent: read(SYNC_RECVPID_ACK)"); + } + if (s != SYNC_RECVPID_ACK) { + kill(child, SIGKILL); + bail("failed to sync with parent: SYNC_RECVPID_ACK: got %u", s); + } + + s = SYNC_CHILD_READY; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) { + kill(child, SIGKILL); + bail("failed to sync with parent: write(SYNC_CHILD_READY)"); + } + + /* Our work is done. [Stage 2: JUMP_INIT] is doing the rest of the work. */ + exit(0); + } + + /* + * Stage 2: We're the final child process, and the only process that will + * actually return to the Go runtime. Our job is to just do the + * final cleanup steps and then return to the Go runtime to allow + * init_linux.go to run. + */ + case JUMP_INIT:{ + /* + * We're inside the child now, having jumped from the + * start_child() code after forking in the parent. + */ + enum sync_t s; + + /* We're in a child and thus need to tell the parent if we die. */ + syncfd = sync_grandchild_pipe[0]; + close(sync_grandchild_pipe[1]); + close(sync_child_pipe[0]); + close(sync_child_pipe[1]); + + /* For debugging. */ + prctl(PR_SET_NAME, (unsigned long)"runc:[2:INIT]", 0, 0, 0); + + if (read(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with parent: read(SYNC_GRANDCHILD)"); + if (s != SYNC_GRANDCHILD) + bail("failed to sync with parent: SYNC_GRANDCHILD: got %u", s); + + if (setsid() < 0) + bail("setsid failed"); + + if (setuid(0) < 0) + bail("setuid failed"); + + if (setgid(0) < 0) + bail("setgid failed"); + + if (!config.is_rootless_euid && config.is_setgroup) { + if (setgroups(0, NULL) < 0) + bail("setgroups failed"); + } + + /* ... wait until our topmost parent has finished cgroup setup in p.manager.Apply() ... */ + if (config.cloneflags & CLONE_NEWCGROUP) { + uint8_t value; + if (read(pipenum, &value, sizeof(value)) != sizeof(value)) + bail("read synchronisation value failed"); + if (value == CREATECGROUPNS) { + if (unshare(CLONE_NEWCGROUP) < 0) + bail("failed to unshare cgroup namespace"); + } else + bail("received unknown synchronisation value"); + } + + s = SYNC_CHILD_READY; + if (write(syncfd, &s, sizeof(s)) != sizeof(s)) + bail("failed to sync with patent: write(SYNC_CHILD_READY)"); + + /* Close sync pipes. */ + close(sync_grandchild_pipe[0]); + + /* Free netlink data. */ + nl_free(&config); + + /* Finish executing, let the Go runtime take over. */ + return; + } + default: + bail("unexpected jump value"); + } + + /* Should never be reached. */ + bail("should never be reached"); +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go new file mode 100644 index 0000000..a4ae890 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -0,0 +1,155 @@ +// +build linux + +package system + +import ( + "os" + "os/exec" + "syscall" // only for exec + "unsafe" + + "github.com/opencontainers/runc/libcontainer/user" + "golang.org/x/sys/unix" +) + +// If arg2 is nonzero, set the "child subreaper" attribute of the +// calling process; if arg2 is zero, unset the attribute. When a +// process is marked as a child subreaper, all of the children +// that it creates, and their descendants, will be marked as +// having a subreaper. In effect, a subreaper fulfills the role +// of init(1) for its descendant processes. Upon termination of +// a process that is orphaned (i.e., its immediate parent has +// already terminated) and marked as having a subreaper, the +// nearest still living ancestor subreaper will receive a SIGCHLD +// signal and be able to wait(2) on the process to discover its +// termination status. +const PR_SET_CHILD_SUBREAPER = 36 + +type ParentDeathSignal int + +func (p ParentDeathSignal) Restore() error { + if p == 0 { + return nil + } + current, err := GetParentDeathSignal() + if err != nil { + return err + } + if p == current { + return nil + } + return p.Set() +} + +func (p ParentDeathSignal) Set() error { + return SetParentDeathSignal(uintptr(p)) +} + +func Execv(cmd string, args []string, env []string) error { + name, err := exec.LookPath(cmd) + if err != nil { + return err + } + + return syscall.Exec(name, args, env) +} + +func Prlimit(pid, resource int, limit unix.Rlimit) error { + _, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) + if err != 0 { + return err + } + return nil +} + +func SetParentDeathSignal(sig uintptr) error { + if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil { + return err + } + return nil +} + +func GetParentDeathSignal() (ParentDeathSignal, error) { + var sig int + if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil { + return -1, err + } + return ParentDeathSignal(sig), nil +} + +func SetKeepCaps() error { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil { + return err + } + + return nil +} + +func ClearKeepCaps() error { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil { + return err + } + + return nil +} + +func Setctty() error { + if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil { + return err + } + return nil +} + +// RunningInUserNS detects whether we are currently running in a user namespace. +// Originally copied from github.com/lxc/lxd/shared/util.go +func RunningInUserNS() bool { + uidmap, err := user.CurrentProcessUIDMap() + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return false + } + return UIDMapInUserNS(uidmap) +} + +func UIDMapInUserNS(uidmap []user.IDMap) bool { + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 { + return false + } + return true +} + +// GetParentNSeuid returns the euid within the parent user namespace +func GetParentNSeuid() int64 { + euid := int64(os.Geteuid()) + uidmap, err := user.CurrentProcessUIDMap() + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return euid + } + for _, um := range uidmap { + if um.ID <= euid && euid <= um.ID+um.Count-1 { + return um.ParentID + euid - um.ID + } + } + return euid +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) +} + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + + if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { + return -1, err + } + + return int(i), nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go new file mode 100644 index 0000000..79232a4 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go @@ -0,0 +1,113 @@ +package system + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +// State is the status of a process. +type State rune + +const ( // Only values for Linux 3.14 and later are listed here + Dead State = 'X' + DiskSleep State = 'D' + Running State = 'R' + Sleeping State = 'S' + Stopped State = 'T' + TracingStop State = 't' + Zombie State = 'Z' +) + +// String forms of the state from proc(5)'s documentation for +// /proc/[pid]/status' "State" field. +func (s State) String() string { + switch s { + case Dead: + return "dead" + case DiskSleep: + return "disk sleep" + case Running: + return "running" + case Sleeping: + return "sleeping" + case Stopped: + return "stopped" + case TracingStop: + return "tracing stop" + case Zombie: + return "zombie" + default: + return fmt.Sprintf("unknown (%c)", s) + } +} + +// Stat_t represents the information from /proc/[pid]/stat, as +// described in proc(5) with names based on the /proc/[pid]/status +// fields. +type Stat_t struct { + // PID is the process ID. + PID uint + + // Name is the command run by the process. + Name string + + // State is the state of the process. + State State + + // StartTime is the number of clock ticks after system boot (since + // Linux 2.6). + StartTime uint64 +} + +// Stat returns a Stat_t instance for the specified process. +func Stat(pid int) (stat Stat_t, err error) { + bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return stat, err + } + return parseStat(string(bytes)) +} + +// GetProcessStartTime is deprecated. Use Stat(pid) and +// Stat_t.StartTime instead. +func GetProcessStartTime(pid int) (string, error) { + stat, err := Stat(pid) + if err != nil { + return "", err + } + return fmt.Sprintf("%d", stat.StartTime), nil +} + +func parseStat(data string) (stat Stat_t, err error) { + // From proc(5), field 2 could contain space and is inside `(` and `)`. + // The following is an example: + // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 + i := strings.LastIndex(data, ")") + if i <= 2 || i >= len(data)-1 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } + + parts := strings.SplitN(data[:i], "(", 2) + if len(parts) != 2 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } + + stat.Name = parts[1] + _, err = fmt.Sscanf(parts[0], "%d", &stat.PID) + if err != nil { + return stat, err + } + + // parts indexes should be offset by 3 from the field number given + // proc(5), because parts is zero-indexed and we've removed fields + // one (PID) and two (Name) in the paren-split. + parts = strings.Split(data[i+2:], " ") + var state int + fmt.Sscanf(parts[3-3], "%c", &state) + stat.State = State(state) + fmt.Sscanf(parts[22-3], "%d", &stat.StartTime) + return stat, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go new file mode 100644 index 0000000..c5ca5d8 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go @@ -0,0 +1,26 @@ +// +build linux +// +build 386 arm + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go new file mode 100644 index 0000000..11c3faa --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -0,0 +1,26 @@ +// +build linux +// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go new file mode 100644 index 0000000..b8434f1 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go @@ -0,0 +1,12 @@ +// +build cgo,linux + +package system + +/* +#include +*/ +import "C" + +func GetClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go new file mode 100644 index 0000000..d93b5d5 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go @@ -0,0 +1,15 @@ +// +build !cgo windows + +package system + +func GetClockTicks() int { + // TODO figure out a better alternative for platforms where we're missing cgo + // + // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx + // + // An example of its usage can be found here. + // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx + + return 100 +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go new file mode 100644 index 0000000..b94be74 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go @@ -0,0 +1,27 @@ +// +build !linux + +package system + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// RunningInUserNS is a stub for non-Linux systems +// Always returns false +func RunningInUserNS() bool { + return false +} + +// UIDMapInUserNS is a stub for non-Linux systems +// Always returns false +func UIDMapInUserNS(uidmap []user.IDMap) bool { + return false +} + +// GetParentNSeuid returns the euid within the parent user namespace +// Always returns os.Geteuid on non-linux +func GetParentNSeuid() int { + return os.Geteuid() +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go new file mode 100644 index 0000000..a6823fc --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go @@ -0,0 +1,35 @@ +package system + +import "golang.org/x/sys/unix" + +// Returns a []byte slice if the xattr is set and nil otherwise +// Requires path and its attribute as arguments +func Lgetxattr(path string, attr string) ([]byte, error) { + var sz int + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + switch { + case errno == unix.ENODATA: + return nil, errno + case errno == unix.ENOTSUP: + return nil, errno + case errno == unix.ERANGE: + // 128 byte array might just not be good enough, + // A dummy buffer is used to get the real size + // of the xattrs on disk + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + if errno != nil { + return nil, errno + } + case errno != nil: + return nil, errno + } + return dest[:sz], nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go new file mode 100644 index 0000000..6fd8dd0 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go @@ -0,0 +1,41 @@ +package user + +import ( + "errors" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") + // No matching entries found in file. + ErrNoPasswdEntries = errors.New("no matching entries in passwd file") + ErrNoGroupEntries = errors.New("no matching entries in group file") +) + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(username) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUid(uid) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(groupname) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGid(gid) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go new file mode 100644 index 0000000..92b5ae8 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -0,0 +1,144 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" + "strconv" + + "golang.org/x/sys/unix" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +func lookupUser(username string) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Name == username + }) +} + +func lookupUid(uid int) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupUserFunc(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, ErrNoPasswdEntries + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +func lookupGroup(groupname string) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Name == groupname + }) +} + +func lookupGid(gid int) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Gid == gid + }) +} + +func lookupGroupFunc(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, ErrNoGroupEntries + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(unix.Getuid()) +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(unix.Getgid()) +} + +func currentUserSubIDs(fileName string) ([]SubID, error) { + u, err := CurrentUser() + if err != nil { + return nil, err + } + filter := func(entry SubID) bool { + return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) + } + return ParseSubIDFileFilter(fileName, filter) +} + +func CurrentUserSubUIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subuid") +} + +func CurrentUserSubGIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subgid") +} + +func CurrentProcessUIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/uid_map") +} + +func CurrentProcessGIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/gid_map") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go new file mode 100644 index 0000000..65cd40e --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go @@ -0,0 +1,40 @@ +// +build windows + +package user + +import ( + "fmt" + "os/user" +) + +func lookupUser(username string) (User, error) { + u, err := user.Lookup(username) + if err != nil { + return User{}, err + } + return userFromOS(u) +} + +func lookupUid(uid int) (User, error) { + u, err := user.LookupId(fmt.Sprintf("%d", uid)) + if err != nil { + return User{}, err + } + return userFromOS(u) +} + +func lookupGroup(groupname string) (Group, error) { + g, err := user.LookupGroup(groupname) + if err != nil { + return Group{}, err + } + return groupFromOS(g) +} + +func lookupGid(gid int) (Group, error) { + g, err := user.LookupGroupId(fmt.Sprintf("%d", gid)) + if err != nil { + return Group{}, err + } + return groupFromOS(g) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go new file mode 100644 index 0000000..7b912bb --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -0,0 +1,608 @@ +package user + +import ( + "bufio" + "fmt" + "io" + "os" + "os/user" + "strconv" + "strings" +) + +const ( + minId = 0 + maxId = 1<<31 - 1 //for 32-bit systems compatibility +) + +var ( + ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +// userFromOS converts an os/user.(*User) to local User +// +// (This does not include Pass, Shell or Gecos) +func userFromOS(u *user.User) (User, error) { + newUser := User{ + Name: u.Username, + Home: u.HomeDir, + } + id, err := strconv.Atoi(u.Uid) + if err != nil { + return newUser, err + } + newUser.Uid = id + + id, err = strconv.Atoi(u.Gid) + if err != nil { + return newUser, err + } + newUser.Gid = id + return newUser, nil +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +// groupFromOS converts an os/user.(*Group) to local Group +// +// (This does not include Pass, Shell or Gecos) +func groupFromOS(g *user.Group) (Group, error) { + newGroup := Group{ + Name: g.Name, + } + + id, err := strconv.Atoi(g.Gid) + if err != nil { + return newGroup, err + } + newGroup.Gid = id + + return newGroup, nil +} + +// SubID represents an entry in /etc/sub{u,g}id +type SubID struct { + Name string + SubID int64 + Count int64 +} + +// IDMap represents an entry in /proc/PID/{u,g}id_map +type IDMap struct { + ID int64 + ParentID int64 + Count int64 +} + +func parseLine(line string, v ...interface{}) { + parseParts(strings.Split(line, ":"), v...) +} + +func parseParts(parts []string, v ...interface{}) { + if len(parts) == 0 { + return + } + + for i, p := range parts { + // Ignore cases where we don't have enough fields to populate the arguments. + // Some configuration files like to misbehave. + if len(v) <= i { + break + } + + // Use the type of the argument to figure out how to parse it, scanf() style. + // This is legit. + switch e := v[i].(type) { + case *string: + *e = p + case *int: + // "numbers", with conversion errors ignored because of some misbehaving configuration files. + *e, _ = strconv.Atoi(p) + case *int64: + *e, _ = strconv.ParseInt(p, 10, 64) + case *[]string: + // Comma-separated lists. + if p != "" { + *e = strings.Split(p, ",") + } else { + *e = []string{} + } + default: + // Someone goof'd when writing code using this function. Scream so they can hear us. + panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []Group{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + if text == "" { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +type ExecUser struct { + Uid int + Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + var passwd, group io.Reader + + if passwdFile, err := os.Open(passwdPath); err == nil { + passwd = passwdFile + defer passwdFile.Close() + } + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +// +// It should be noted that if you specify a numeric user or group id, they will +// not be evaluated as usernames (only the metadata will be filled). So attempting +// to parse a user with user.Name = "1337" will produce the user with a UID of +// 1337. +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax + var userArg, groupArg string + parseLine(userSpec, &userArg, &groupArg) + + // Convert userArg and groupArg to be numeric, so we don't have to execute + // Atoi *twice* for each iteration over lines. + uidArg, uidErr := strconv.Atoi(userArg) + gidArg, gidErr := strconv.Atoi(groupArg) + + // Find the matching user. + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + // Default to current state of the user. + return u.Uid == user.Uid + } + + if uidErr == nil { + // If the userArg is numeric, always treat it as a UID. + return uidArg == u.Uid + } + + return u.Name == userArg + }) + + // If we can't find the user, we have to bail. + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("unable to find user %s: %v", userArg, err) + } + + var matchedUserName string + if len(users) > 0 { + // First match wins, even if there's more than one matching entry. + matchedUserName = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // If we can't find a user with the given username, the only other valid + // option is if it's a numeric username with no associated entry in passwd. + + if uidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries) + } + user.Uid = uidArg + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + + // On to the groups. If we matched a username, we need to do this because of + // the supplementary group IDs. + if groupArg != "" || matchedUserName != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // If the group argument isn't explicit, we'll just search for it. + if groupArg == "" { + // Check if user is a member of this group. + for _, u := range g.List { + if u == matchedUserName { + return true + } + } + return false + } + + if gidErr == nil { + // If the groupArg is numeric, always treat it as a GID. + return gidArg == g.Gid + } + + return g.Name == groupArg + }) + if err != nil && group != nil { + return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err) + } + + // Only start modifying user.Gid if it is in explicit form. + if groupArg != "" { + if len(groups) > 0 { + // First match wins, even if there's more than one matching entry. + user.Gid = groups[0].Gid + } else { + // If we can't find a group with the given name, the only other valid + // option is if it's a numeric group name with no associated entry in group. + + if gidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries) + } + user.Gid = gidArg + + // Must be inside valid gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + } else if len(groups) > 0 { + // Supplementary group ids only make sense if in the implicit form. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + var groups = []Group{} + if group != nil { + var err error + groups, err = ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + } + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.Atoi(ag) + if err != nil { + return nil, fmt.Errorf("Unable to find group %s", ag) + } + // Ensure gid is inside gid range. + if gid < minId || gid > maxId { + return nil, ErrRange + } + gidMap[gid] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + var group io.Reader + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + return GetAdditionalGroups(additionalGroups, group) +} + +func ParseSubIDFile(path string) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubID(subid) +} + +func ParseSubID(subid io.Reader) ([]SubID, error) { + return ParseSubIDFilter(subid, nil) +} + +func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubIDFilter(subid, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { + if r == nil { + return nil, fmt.Errorf("nil source for subid-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []SubID{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 5 subuid + p := SubID{} + parseLine(line, &p.Name, &p.SubID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseIDMapFile(path string) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMap(r) +} + +func ParseIDMap(r io.Reader) ([]IDMap, error) { + return ParseIDMapFilter(r, nil) +} + +func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMapFilter(r, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { + if r == nil { + return nil, fmt.Errorf("nil source for idmap-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []IDMap{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 7 user_namespaces + p := IDMap{} + parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} diff --git a/vendor/github.com/opencontainers/runc/vendor.conf b/vendor/github.com/opencontainers/runc/vendor.conf new file mode 100644 index 0000000..fadbe07 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/vendor.conf @@ -0,0 +1,25 @@ +# OCI runtime-spec. When updating this, make sure you use a version tag rather +# than a commit ID so it's much more obvious what version of the spec we are +# using. +github.com/opencontainers/runtime-spec 5684b8af48c1ac3b1451fa499724e30e3c20a294 +# Core libcontainer functionality. +github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08 +github.com/opencontainers/selinux v1.0.0-rc1 +github.com/seccomp/libseccomp-golang 84e90a91acea0f4e51e62bc1a75de18b1fc0790f +github.com/sirupsen/logrus a3f95b5c423586578a4e099b11a46c2479628cac +github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 +github.com/vishvananda/netlink 1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270 +# systemd integration. +github.com/coreos/go-systemd v14 +github.com/coreos/pkg v3 +github.com/godbus/dbus v3 +github.com/golang/protobuf 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8 +# Command-line interface. +github.com/cyphar/filepath-securejoin v0.2.1 +github.com/docker/go-units v0.2.0 +github.com/urfave/cli d53eb991652b1d438abdd34ce4bfa3ef1539108e +golang.org/x/sys 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce https://github.com/golang/sys + +# console dependencies +github.com/containerd/console 2748ece16665b45a47f884001d5831ec79703880 +github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runtime-spec/LICENSE new file mode 100644 index 0000000..bdc4036 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runtime-spec/README.md b/vendor/github.com/opencontainers/runtime-spec/README.md new file mode 100644 index 0000000..2f7eb60 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/README.md @@ -0,0 +1,158 @@ +# Open Container Initiative Runtime Specification + +The [Open Container Initiative][oci] develops specifications for standards on Operating System process and application containers. + +The specification can be found [here](spec.md). + +## Table of Contents + +Additional documentation about how this group operates: + +- [Code of Conduct][code-of-conduct] +- [Style and Conventions](style.md) +- [Implementations](implementations.md) +- [Releases](RELEASES.md) +- [project](project.md) +- [charter][charter] + +## Use Cases + +To provide context for users the following section gives example use cases for each part of the spec. + +### Application Bundle Builders + +Application bundle builders can create a [bundle](bundle.md) directory that includes all of the files required for launching an application as a container. +The bundle contains an OCI [configuration file](config.md) where the builder can specify host-independent details such as [which executable to launch](config.md#process) and host-specific settings such as [mount](config.md#mounts) locations, [hook](config.md#hooks) paths, Linux [namespaces](config-linux.md#namespaces) and [cgroups](config-linux.md#control-groups). +Because the configuration includes host-specific settings, application bundle directories copied between two hosts may require configuration adjustments. + +### Hook Developers + +[Hook](config.md#hooks) developers can extend the functionality of an OCI-compliant runtime by hooking into a container's lifecycle with an external application. +Example use cases include sophisticated network configuration, volume garbage collection, etc. + +### Runtime Developers + +Runtime developers can build runtime implementations that run OCI-compliant bundles and container configuration, containing low-level OS and host-specific details, on a particular platform. + +## Contributing + +Development happens on GitHub for the spec. +Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). + +The specification and code is licensed under the Apache 2.0 license found in the [LICENSE](./LICENSE) file. + +### Discuss your design + +The project welcomes submissions, but please let everyone know what you are working on. + +Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do. +This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits. +It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions. + +Typos and grammatical errors can go straight to a pull-request. +When in doubt, start on the [mailing-list](#mailing-list). + +### Weekly Call + +The contributors and maintainers of all OCI projects have a weekly meeting on Wednesdays at: + +* 8:00 AM (USA Pacific), during [odd weeks][iso-week]. +* 2:00 PM (USA Pacific), during [even weeks][iso-week]. + +There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics). + +Everyone is welcome to participate via [UberConference web][uberconference] or audio-only: +1 415 968 0849 (no PIN needed). +An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there. +Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes], with minutes from especially old meetings (September 2015 and earlier) archived [here][runtime-wiki]. + +### Mailing List + +You can subscribe and join the mailing list on [Google Groups][dev-list]. + +### IRC + +OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]). + +### Git commit + +#### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. +The rules are pretty simple: if you can certify the below (from http://developercertificate.org): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +#### Commit Style + +Simple house-keeping for clean git history. +Read more on [How to Write a Git Commit Message][how-to-git-commit] or the Discussion section of [git-commit(1)][git-commit.1]. + +1. Separate the subject from body with a blank line +2. Limit the subject line to 50 characters +3. Capitalize the subject line +4. Do not end the subject line with a period +5. Use the imperative mood in the subject line +6. Wrap the body at 72 characters +7. Use the body to explain what and why vs. how + * If there was important/useful/essential conversation or information, copy or include a reference +8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...") + + +[charter]: https://www.opencontainers.org/about/governance +[code-of-conduct]: https://github.com/opencontainers/tob/blob/master/code-of-conduct.md +[dev-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev +[how-to-git-commit]: http://chris.beams.io/posts/git-commit +[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ +[iso-week]: https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_of_a_given_date +[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/ +[oci]: https://www.opencontainers.org +[rfc5545]: https://tools.ietf.org/html/rfc5545 +[runtime-wiki]: https://github.com/opencontainers/runtime-spec/wiki +[uberconference]: https://www.uberconference.com/opencontainers + +[git-commit.1]: http://git-scm.com/docs/git-commit diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go new file mode 100644 index 0000000..f3f37d4 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -0,0 +1,570 @@ +package specs + +import "os" + +// Spec is the base configuration for the container. +type Spec struct { + // Version of the Open Container Runtime Specification with which the bundle complies. + Version string `json:"ociVersion"` + // Process configures the container process. + Process *Process `json:"process,omitempty"` + // Root configures the container's root filesystem. + Root *Root `json:"root,omitempty"` + // Hostname configures the container's hostname. + Hostname string `json:"hostname,omitempty"` + // Mounts configures additional mounts (on top of Root). + Mounts []Mount `json:"mounts,omitempty"` + // Hooks configures callbacks for container lifecycle events. + Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"` + // Annotations contains arbitrary metadata for the container. + Annotations map[string]string `json:"annotations,omitempty"` + + // Linux is platform-specific configuration for Linux based containers. + Linux *Linux `json:"linux,omitempty" platform:"linux"` + // Solaris is platform-specific configuration for Solaris based containers. + Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"` + // Windows is platform-specific configuration for Windows based containers. + Windows *Windows `json:"windows,omitempty" platform:"windows"` +} + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal,omitempty"` + // ConsoleSize specifies the size of the console. + ConsoleSize *Box `json:"consoleSize,omitempty"` + // User specifies user information for the process. + User User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd string `json:"cwd"` + // Capabilities are Linux capabilities that are kept for the process. + Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"` + // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. + NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` + // Specify an oom_score_adj for the container. + OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` +} + +// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process. +// http://man7.org/linux/man-pages/man7/capabilities.7.html +type LinuxCapabilities struct { + // Bounding is the set of capabilities checked by the kernel. + Bounding []string `json:"bounding,omitempty" platform:"linux"` + // Effective is the set of capabilities checked by the kernel. + Effective []string `json:"effective,omitempty" platform:"linux"` + // Inheritable is the capabilities preserved across execve. + Inheritable []string `json:"inheritable,omitempty" platform:"linux"` + // Permitted is the limiting superset for effective capabilities. + Permitted []string `json:"permitted,omitempty" platform:"linux"` + // Ambient is the ambient set of capabilities that are kept. + Ambient []string `json:"ambient,omitempty" platform:"linux"` +} + +// Box specifies dimensions of a rectangle. Used for specifying the size of a console. +type Box struct { + // Height is the vertical dimension of a box. + Height uint `json:"height"` + // Width is the horizontal dimension of a box. + Width uint `json:"width"` +} + +// User specifies specific user (and group) information for the container process. +type User struct { + // UID is the user id. + UID uint32 `json:"uid" platform:"linux,solaris"` + // GID is the group id. + GID uint32 `json:"gid" platform:"linux,solaris"` + // AdditionalGids are additional group ids set for the container's process. + AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` + // Username is the user name. + Username string `json:"username,omitempty" platform:"windows"` +} + +// Root contains information about the container's root filesystem on the host. +type Root struct { + // Path is the absolute path to the container's root filesystem. + Path string `json:"path"` + // Readonly makes the root filesystem for the container readonly before the process is executed. + Readonly bool `json:"readonly,omitempty"` +} + +// Mount specifies a mount for a container. +type Mount struct { + // Destination is the absolute path where the mount will be placed in the container. + Destination string `json:"destination"` + // Type specifies the mount kind. + Type string `json:"type,omitempty" platform:"linux,solaris"` + // Source specifies the source path of the mount. + Source string `json:"source,omitempty"` + // Options are fstab style mount options. + Options []string `json:"options,omitempty"` +} + +// Hook specifies a command that is run at a particular event in the lifecycle of a container +type Hook struct { + Path string `json:"path"` + Args []string `json:"args,omitempty"` + Env []string `json:"env,omitempty"` + Timeout *int `json:"timeout,omitempty"` +} + +// Hooks for container setup and teardown +type Hooks struct { + // Prestart is a list of hooks to be run before the container process is executed. + Prestart []Hook `json:"prestart,omitempty"` + // Poststart is a list of hooks to be run after the container process is started. + Poststart []Hook `json:"poststart,omitempty"` + // Poststop is a list of hooks to be run after the container process exits. + Poststop []Hook `json:"poststop,omitempty"` +} + +// Linux contains platform-specific configuration for Linux based containers. +type Linux struct { + // UIDMapping specifies user mappings for supporting user namespaces. + UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty"` + // GIDMapping specifies group mappings for supporting user namespaces. + GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty"` + // Sysctl are a set of key value pairs that are set for the container on start + Sysctl map[string]string `json:"sysctl,omitempty"` + // Resources contain cgroup information for handling resource constraints + // for the container + Resources *LinuxResources `json:"resources,omitempty"` + // CgroupsPath specifies the path to cgroups that are created and/or joined by the container. + // The path is expected to be relative to the cgroups mountpoint. + // If resources are specified, the cgroups at CgroupsPath will be updated based on resources. + CgroupsPath string `json:"cgroupsPath,omitempty"` + // Namespaces contains the namespaces that are created and/or joined by the container + Namespaces []LinuxNamespace `json:"namespaces,omitempty"` + // Devices are a list of device nodes that are created for the container + Devices []LinuxDevice `json:"devices,omitempty"` + // Seccomp specifies the seccomp security settings for the container. + Seccomp *LinuxSeccomp `json:"seccomp,omitempty"` + // RootfsPropagation is the rootfs mount propagation mode for the container. + RootfsPropagation string `json:"rootfsPropagation,omitempty"` + // MaskedPaths masks over the provided paths inside the container. + MaskedPaths []string `json:"maskedPaths,omitempty"` + // ReadonlyPaths sets the provided paths as RO inside the container. + ReadonlyPaths []string `json:"readonlyPaths,omitempty"` + // MountLabel specifies the selinux context for the mounts in the container. + MountLabel string `json:"mountLabel,omitempty"` + // IntelRdt contains Intel Resource Director Technology (RDT) information + // for handling resource constraints (e.g., L3 cache) for the container + IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` +} + +// LinuxNamespace is the configuration for a Linux namespace +type LinuxNamespace struct { + // Type is the type of namespace + Type LinuxNamespaceType `json:"type"` + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + Path string `json:"path,omitempty"` +} + +// LinuxNamespaceType is one of the Linux namespaces +type LinuxNamespaceType string + +const ( + // PIDNamespace for isolating process IDs + PIDNamespace LinuxNamespaceType = "pid" + // NetworkNamespace for isolating network devices, stacks, ports, etc + NetworkNamespace = "network" + // MountNamespace for isolating mount points + MountNamespace = "mount" + // IPCNamespace for isolating System V IPC, POSIX message queues + IPCNamespace = "ipc" + // UTSNamespace for isolating hostname and NIS domain name + UTSNamespace = "uts" + // UserNamespace for isolating user and group IDs + UserNamespace = "user" + // CgroupNamespace for isolating cgroup hierarchies + CgroupNamespace = "cgroup" +) + +// LinuxIDMapping specifies UID/GID mappings +type LinuxIDMapping struct { + // HostID is the starting UID/GID on the host to be mapped to 'ContainerID' + HostID uint32 `json:"hostID"` + // ContainerID is the starting UID/GID in the container + ContainerID uint32 `json:"containerID"` + // Size is the number of IDs to be mapped + Size uint32 `json:"size"` +} + +// POSIXRlimit type and restrictions +type POSIXRlimit struct { + // Type of the rlimit to set + Type string `json:"type"` + // Hard is the hard limit for the specified type + Hard uint64 `json:"hard"` + // Soft is the soft limit for the specified type + Soft uint64 `json:"soft"` +} + +// LinuxHugepageLimit structure corresponds to limiting kernel hugepages +type LinuxHugepageLimit struct { + // Pagesize is the hugepage size + Pagesize string `json:"pageSize"` + // Limit is the limit of "hugepagesize" hugetlb usage + Limit uint64 `json:"limit"` +} + +// LinuxInterfacePriority for network interfaces +type LinuxInterfacePriority struct { + // Name is the name of the network interface + Name string `json:"name"` + // Priority for the interface + Priority uint32 `json:"priority"` +} + +// linuxBlockIODevice holds major:minor format supported in blkio cgroup +type linuxBlockIODevice struct { + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` +} + +// LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice +type LinuxWeightDevice struct { + linuxBlockIODevice + // Weight is the bandwidth rate for the device. + Weight *uint16 `json:"weight,omitempty"` + // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only + LeafWeight *uint16 `json:"leafWeight,omitempty"` +} + +// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair +type LinuxThrottleDevice struct { + linuxBlockIODevice + // Rate is the IO rate limit per cgroup per device + Rate uint64 `json:"rate"` +} + +// LinuxBlockIO for Linux cgroup 'blkio' resource management +type LinuxBlockIO struct { + // Specifies per cgroup weight + Weight *uint16 `json:"weight,omitempty"` + // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only + LeafWeight *uint16 `json:"leafWeight,omitempty"` + // Weight per cgroup per device, can override BlkioWeight + WeightDevice []LinuxWeightDevice `json:"weightDevice,omitempty"` + // IO read rate limit per cgroup per device, bytes per second + ThrottleReadBpsDevice []LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"` + // IO write rate limit per cgroup per device, bytes per second + ThrottleWriteBpsDevice []LinuxThrottleDevice `json:"throttleWriteBpsDevice,omitempty"` + // IO read rate limit per cgroup per device, IO per second + ThrottleReadIOPSDevice []LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"` + // IO write rate limit per cgroup per device, IO per second + ThrottleWriteIOPSDevice []LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"` +} + +// LinuxMemory for Linux cgroup 'memory' resource management +type LinuxMemory struct { + // Memory limit (in bytes). + Limit *int64 `json:"limit,omitempty"` + // Memory reservation or soft_limit (in bytes). + Reservation *int64 `json:"reservation,omitempty"` + // Total memory limit (memory + swap). + Swap *int64 `json:"swap,omitempty"` + // Kernel memory limit (in bytes). + Kernel *int64 `json:"kernel,omitempty"` + // Kernel memory limit for tcp (in bytes) + KernelTCP *int64 `json:"kernelTCP,omitempty"` + // How aggressive the kernel will swap memory pages. + Swappiness *uint64 `json:"swappiness,omitempty"` + // DisableOOMKiller disables the OOM killer for out of memory conditions + DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` +} + +// LinuxCPU for Linux cgroup 'cpu' resource management +type LinuxCPU struct { + // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares). + Shares *uint64 `json:"shares,omitempty"` + // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + Quota *int64 `json:"quota,omitempty"` + // CPU period to be used for hardcapping (in usecs). + Period *uint64 `json:"period,omitempty"` + // How much time realtime scheduling may use (in usecs). + RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"` + // CPU period to be used for realtime scheduling (in usecs). + RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"` + // CPUs to use within the cpuset. Default is to use any CPU available. + Cpus string `json:"cpus,omitempty"` + // List of memory nodes in the cpuset. Default is to use any available memory node. + Mems string `json:"mems,omitempty"` +} + +// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) +type LinuxPids struct { + // Maximum number of PIDs. Default is "no limit". + Limit int64 `json:"limit"` +} + +// LinuxNetwork identification and priority configuration +type LinuxNetwork struct { + // Set class identifier for container's network packets + ClassID *uint32 `json:"classID,omitempty"` + // Set priority of network traffic for container + Priorities []LinuxInterfacePriority `json:"priorities,omitempty"` +} + +// LinuxResources has container runtime resource constraints +type LinuxResources struct { + // Devices configures the device whitelist. + Devices []LinuxDeviceCgroup `json:"devices,omitempty"` + // Memory restriction configuration + Memory *LinuxMemory `json:"memory,omitempty"` + // CPU resource restriction configuration + CPU *LinuxCPU `json:"cpu,omitempty"` + // Task resource restriction configuration. + Pids *LinuxPids `json:"pids,omitempty"` + // BlockIO restriction configuration + BlockIO *LinuxBlockIO `json:"blockIO,omitempty"` + // Hugetlb limit (in bytes) + HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"` + // Network restriction configuration + Network *LinuxNetwork `json:"network,omitempty"` +} + +// LinuxDevice represents the mknod information for a Linux special device file +type LinuxDevice struct { + // Path to the device. + Path string `json:"path"` + // Device type, block, char, etc. + Type string `json:"type"` + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` + // FileMode permission bits for the device. + FileMode *os.FileMode `json:"fileMode,omitempty"` + // UID of the device. + UID *uint32 `json:"uid,omitempty"` + // Gid of the device. + GID *uint32 `json:"gid,omitempty"` +} + +// LinuxDeviceCgroup represents a device rule for the whitelist controller +type LinuxDeviceCgroup struct { + // Allow or deny + Allow bool `json:"allow"` + // Device type, block, char, etc. + Type string `json:"type,omitempty"` + // Major is the device's major number. + Major *int64 `json:"major,omitempty"` + // Minor is the device's minor number. + Minor *int64 `json:"minor,omitempty"` + // Cgroup access permissions format, rwm. + Access string `json:"access,omitempty"` +} + +// Solaris contains platform-specific configuration for Solaris application containers. +type Solaris struct { + // SMF FMRI which should go "online" before we start the container process. + Milestone string `json:"milestone,omitempty"` + // Maximum set of privileges any process in this container can obtain. + LimitPriv string `json:"limitpriv,omitempty"` + // The maximum amount of shared memory allowed for this container. + MaxShmMemory string `json:"maxShmMemory,omitempty"` + // Specification for automatic creation of network resources for this container. + Anet []SolarisAnet `json:"anet,omitempty"` + // Set limit on the amount of CPU time that can be used by container. + CappedCPU *SolarisCappedCPU `json:"cappedCPU,omitempty"` + // The physical and swap caps on the memory that can be used by this container. + CappedMemory *SolarisCappedMemory `json:"cappedMemory,omitempty"` +} + +// SolarisCappedCPU allows users to set limit on the amount of CPU time that can be used by container. +type SolarisCappedCPU struct { + Ncpus string `json:"ncpus,omitempty"` +} + +// SolarisCappedMemory allows users to set the physical and swap caps on the memory that can be used by this container. +type SolarisCappedMemory struct { + Physical string `json:"physical,omitempty"` + Swap string `json:"swap,omitempty"` +} + +// SolarisAnet provides the specification for automatic creation of network resources for this container. +type SolarisAnet struct { + // Specify a name for the automatically created VNIC datalink. + Linkname string `json:"linkname,omitempty"` + // Specify the link over which the VNIC will be created. + Lowerlink string `json:"lowerLink,omitempty"` + // The set of IP addresses that the container can use. + Allowedaddr string `json:"allowedAddress,omitempty"` + // Specifies whether allowedAddress limitation is to be applied to the VNIC. + Configallowedaddr string `json:"configureAllowedAddress,omitempty"` + // The value of the optional default router. + Defrouter string `json:"defrouter,omitempty"` + // Enable one or more types of link protection. + Linkprotection string `json:"linkProtection,omitempty"` + // Set the VNIC's macAddress + Macaddress string `json:"macAddress,omitempty"` +} + +// Windows defines the runtime configuration for Windows based containers, including Hyper-V containers. +type Windows struct { + // LayerFolders contains a list of absolute paths to directories containing image layers. + LayerFolders []string `json:"layerFolders"` + // Resources contains information for handling resource constraints for the container. + Resources *WindowsResources `json:"resources,omitempty"` + // CredentialSpec contains a JSON object describing a group Managed Service Account (gMSA) specification. + CredentialSpec interface{} `json:"credentialSpec,omitempty"` + // Servicing indicates if the container is being started in a mode to apply a Windows Update servicing operation. + Servicing bool `json:"servicing,omitempty"` + // IgnoreFlushesDuringBoot indicates if the container is being started in a mode where disk writes are not flushed during its boot process. + IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"` + // HyperV contains information for running a container with Hyper-V isolation. + HyperV *WindowsHyperV `json:"hyperv,omitempty"` + // Network restriction configuration. + Network *WindowsNetwork `json:"network,omitempty"` +} + +// WindowsResources has container runtime resource constraints for containers running on Windows. +type WindowsResources struct { + // Memory restriction configuration. + Memory *WindowsMemoryResources `json:"memory,omitempty"` + // CPU resource restriction configuration. + CPU *WindowsCPUResources `json:"cpu,omitempty"` + // Storage restriction configuration. + Storage *WindowsStorageResources `json:"storage,omitempty"` +} + +// WindowsMemoryResources contains memory resource management settings. +type WindowsMemoryResources struct { + // Memory limit in bytes. + Limit *uint64 `json:"limit,omitempty"` +} + +// WindowsCPUResources contains CPU resource management settings. +type WindowsCPUResources struct { + // Number of CPUs available to the container. + Count *uint64 `json:"count,omitempty"` + // CPU shares (relative weight to other containers with cpu shares). + Shares *uint16 `json:"shares,omitempty"` + // Specifies the portion of processor cycles that this container can use as a percentage times 100. + Maximum *uint16 `json:"maximum,omitempty"` +} + +// WindowsStorageResources contains storage resource management settings. +type WindowsStorageResources struct { + // Specifies maximum Iops for the system drive. + Iops *uint64 `json:"iops,omitempty"` + // Specifies maximum bytes per second for the system drive. + Bps *uint64 `json:"bps,omitempty"` + // Sandbox size specifies the minimum size of the system drive in bytes. + SandboxSize *uint64 `json:"sandboxSize,omitempty"` +} + +// WindowsNetwork contains network settings for Windows containers. +type WindowsNetwork struct { + // List of HNS endpoints that the container should connect to. + EndpointList []string `json:"endpointList,omitempty"` + // Specifies if unqualified DNS name resolution is allowed. + AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"` + // Comma separated list of DNS suffixes to use for name resolution. + DNSSearchList []string `json:"DNSSearchList,omitempty"` + // Name (ID) of the container that we will share with the network stack. + NetworkSharedContainerName string `json:"networkSharedContainerName,omitempty"` +} + +// WindowsHyperV contains information for configuring a container to run with Hyper-V isolation. +type WindowsHyperV struct { + // UtilityVMPath is an optional path to the image used for the Utility VM. + UtilityVMPath string `json:"utilityVMPath,omitempty"` +} + +// LinuxSeccomp represents syscall restrictions +type LinuxSeccomp struct { + DefaultAction LinuxSeccompAction `json:"defaultAction"` + Architectures []Arch `json:"architectures,omitempty"` + Syscalls []LinuxSyscall `json:"syscalls,omitempty"` +} + +// Arch used for additional architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" + ArchPARISC Arch = "SCMP_ARCH_PARISC" + ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" +) + +// LinuxSeccompAction taken upon Seccomp rule match +type LinuxSeccompAction string + +// Define actions for Seccomp rules +const ( + ActKill LinuxSeccompAction = "SCMP_ACT_KILL" + ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP" + ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO" + ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE" + ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW" +) + +// LinuxSeccompOperator used to match syscall arguments in Seccomp +type LinuxSeccompOperator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual LinuxSeccompOperator = "SCMP_CMP_NE" + OpLessThan LinuxSeccompOperator = "SCMP_CMP_LT" + OpLessEqual LinuxSeccompOperator = "SCMP_CMP_LE" + OpEqualTo LinuxSeccompOperator = "SCMP_CMP_EQ" + OpGreaterEqual LinuxSeccompOperator = "SCMP_CMP_GE" + OpGreaterThan LinuxSeccompOperator = "SCMP_CMP_GT" + OpMaskedEqual LinuxSeccompOperator = "SCMP_CMP_MASKED_EQ" +) + +// LinuxSeccompArg used for matching specific syscall arguments in Seccomp +type LinuxSeccompArg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo,omitempty"` + Op LinuxSeccompOperator `json:"op"` +} + +// LinuxSyscall is used to match a syscall in Seccomp +type LinuxSyscall struct { + Names []string `json:"names"` + Action LinuxSeccompAction `json:"action"` + Args []LinuxSeccompArg `json:"args,omitempty"` +} + +// LinuxIntelRdt has container runtime resource constraints +// for Intel RDT/CAT which introduced in Linux 4.10 kernel +type LinuxIntelRdt struct { + // The schema for L3 cache id and capacity bitmask (CBM) + // Format: "L3:=;=;..." + L3CacheSchema string `json:"l3CacheSchema,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go new file mode 100644 index 0000000..89dce34 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go @@ -0,0 +1,17 @@ +package specs + +// State holds information about the runtime state of the container. +type State struct { + // Version is the version of the specification that is supported. + Version string `json:"ociVersion"` + // ID is the container ID + ID string `json:"id"` + // Status is the runtime status of the container. + Status string `json:"status"` + // Pid is the process ID for the container process. + Pid int `json:"pid,omitempty"` + // Bundle is the path to the container's bundle directory. + Bundle string `json:"bundle"` + // Annotations are key values associated with the container. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go new file mode 100644 index 0000000..926ce66 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -0,0 +1,18 @@ +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/selinux/LICENSE b/vendor/github.com/opencontainers/selinux/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/selinux/README.md b/vendor/github.com/opencontainers/selinux/README.md new file mode 100644 index 0000000..043a929 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/README.md @@ -0,0 +1,7 @@ +# selinux + +[![GoDoc](https://godoc.org/github.com/opencontainers/selinux?status.svg)](https://godoc.org/github.com/opencontainers/selinux) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/selinux)](https://goreportcard.com/report/github.com/opencontainers/selinux) [![Build Status](https://travis-ci.org/opencontainers/selinux.svg?branch=master)](https://travis-ci.org/opencontainers/selinux) + +Common SELinux package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/selinux) for more information. diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go new file mode 100644 index 0000000..4e9a8c5 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go @@ -0,0 +1,101 @@ +// +build !selinux !linux + +package label + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. +func InitLabels(options []string) (string, string, error) { + return "", "", nil +} + +func ROMountLabel() string { + return "" +} + +func GenLabels(options string) (string, string, error) { + return "", "", nil +} + +func FormatMountLabel(src string, mountLabel string) string { + return src +} + +func SetProcessLabel(processLabel string) error { + return nil +} + +func ProcessLabel() (string, error) { + return "", nil +} + +func SetSocketLabel(processLabel string) error { + return nil +} + +func SocketLabel() (string, error) { + return "", nil +} + +func FileLabel(path string) (string, error) { + return "", nil +} + +func SetFileLabel(path string, fileLabel string) error { + return nil +} + +func SetFileCreateLabel(fileLabel string) error { + return nil +} + +func Relabel(path string, fileLabel string, shared bool) error { + return nil +} + +func PidLabel(pid int) (string, error) { + return "", nil +} + +func Init() { +} + +// ClearLabels clears all reserved labels +func ClearLabels() { + return +} + +func ReserveLabel(label string) error { + return nil +} + +func ReleaseLabel(label string) error { + return nil +} + +// DupSecOpt takes a process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) ([]string, error) { + return nil, nil +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return nil +} + +// Validate checks that the label does not include unexpected options +func Validate(label string) error { + return nil +} + +// RelabelNeeded checks whether the user requested a relabel +func RelabelNeeded(label string) bool { + return false +} + +// IsShared checks that the label includes a "shared" mark +func IsShared(label string) bool { + return false +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go new file mode 100644 index 0000000..d4e2690 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go @@ -0,0 +1,276 @@ +// +build selinux,linux + +package label + +import ( + "fmt" + "os" + "os/user" + "strings" + + "github.com/opencontainers/selinux/go-selinux" +) + +// Valid Label Options +var validOptions = map[string]bool{ + "disable": true, + "type": true, + "user": true, + "role": true, + "level": true, +} + +var ErrIncompatibleLabel = fmt.Errorf("Bad SELinux option z and Z can not be used together") + +// InitLabels returns the process label and file labels to be used within +// the container. A list of options can be passed into this function to alter +// the labels. The labels returned will include a random MCS String, that is +// guaranteed to be unique. +func InitLabels(options []string) (plabel string, mlabel string, Err error) { + if !selinux.GetEnabled() { + return "", "", nil + } + processLabel, mountLabel := selinux.ContainerLabels() + if processLabel != "" { + defer func() { + if Err != nil { + ReleaseLabel(mountLabel) + } + }() + pcon, err := selinux.NewContext(processLabel) + if err != nil { + return "", "", err + } + + mcon, err := selinux.NewContext(mountLabel) + if err != nil { + return "", "", err + } + for _, opt := range options { + if opt == "disable" { + return "", mountLabel, nil + } + if i := strings.Index(opt, ":"); i == -1 { + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type' followed by ':' and a value", opt) + } + con := strings.SplitN(opt, ":", 2) + if !validOptions[con[0]] { + return "", "", fmt.Errorf("Bad label option %q, valid options 'disable, user, role, level, type'", con[0]) + + } + pcon[con[0]] = con[1] + if con[0] == "level" || con[0] == "user" { + mcon[con[0]] = con[1] + } + } + _ = ReleaseLabel(processLabel) + processLabel = pcon.Get() + mountLabel = mcon.Get() + _ = ReserveLabel(processLabel) + } + return processLabel, mountLabel, nil +} + +func ROMountLabel() string { + return selinux.ROFileLabel() +} + +// DEPRECATED: The GenLabels function is only to be used during the transition to the official API. +func GenLabels(options string) (string, string, error) { + return InitLabels(strings.Fields(options)) +} + +// FormatMountLabel returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabel(src, mountLabel string) string { + if mountLabel != "" { + switch src { + case "": + src = fmt.Sprintf("context=%q", mountLabel) + default: + src = fmt.Sprintf("%s,context=%q", src, mountLabel) + } + } + return src +} + +// SetProcessLabel takes a process label and tells the kernel to assign the +// label to the next program executed by the current process. +func SetProcessLabel(processLabel string) error { + return selinux.SetExecLabel(processLabel) +} + +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created +func SetSocketLabel(processLabel string) error { + return selinux.SetSocketLabel(processLabel) +} + +// SocketLabel retrieves the current default socket label setting +func SocketLabel() (string, error) { + return selinux.SocketLabel() +} + +// ProcessLabel returns the process label that the kernel will assign +// to the next program executed by the current process. If "" is returned +// this indicates that the default labeling will happen for the process. +func ProcessLabel() (string, error) { + return selinux.ExecLabel() +} + +// FileLabel returns the label for specified path +func FileLabel(path string) (string, error) { + return selinux.FileLabel(path) +} + +// SetFileLabel modifies the "path" label to the specified file label +func SetFileLabel(path string, fileLabel string) error { + if selinux.GetEnabled() && fileLabel != "" { + return selinux.SetFileLabel(path, fileLabel) + } + return nil +} + +// SetFileCreateLabel tells the kernel the label for all files to be created +func SetFileCreateLabel(fileLabel string) error { + if selinux.GetEnabled() { + return selinux.SetFSCreateLabel(fileLabel) + } + return nil +} + +// Relabel changes the label of path to the filelabel string. +// It changes the MCS label to s0 if shared is true. +// This will allow all containers to share the content. +func Relabel(path string, fileLabel string, shared bool) error { + if !selinux.GetEnabled() { + return nil + } + + if fileLabel == "" { + return nil + } + + exclude_paths := map[string]bool{ + "/": true, + "/bin": true, + "/boot": true, + "/dev": true, + "/etc": true, + "/etc/passwd": true, + "/etc/pki": true, + "/etc/shadow": true, + "/home": true, + "/lib": true, + "/lib64": true, + "/media": true, + "/opt": true, + "/proc": true, + "/root": true, + "/run": true, + "/sbin": true, + "/srv": true, + "/sys": true, + "/tmp": true, + "/usr": true, + "/var": true, + "/var/lib": true, + "/var/log": true, + } + + if home := os.Getenv("HOME"); home != "" { + exclude_paths[home] = true + } + + if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { + if usr, err := user.Lookup(sudoUser); err == nil { + exclude_paths[usr.HomeDir] = true + } + } + + if path != "/" { + path = strings.TrimSuffix(path, "/") + } + if exclude_paths[path] { + return fmt.Errorf("SELinux relabeling of %s is not allowed", path) + } + + if shared { + c, err := selinux.NewContext(fileLabel) + if err != nil { + return err + } + + c["level"] = "s0" + fileLabel = c.Get() + } + if err := selinux.Chcon(path, fileLabel, true); err != nil { + return err + } + return nil +} + +// PidLabel will return the label of the process running with the specified pid +func PidLabel(pid int) (string, error) { + return selinux.PidLabel(pid) +} + +// Init initialises the labeling system +func Init() { + selinux.GetEnabled() +} + +// ClearLabels will clear all reserved labels +func ClearLabels() { + selinux.ClearLabels() +} + +// ReserveLabel will record the fact that the MCS label has already been used. +// This will prevent InitLabels from using the MCS label in a newly created +// container +func ReserveLabel(label string) error { + selinux.ReserveLabel(label) + return nil +} + +// ReleaseLabel will remove the reservation of the MCS label. +// This will allow InitLabels to use the MCS label in a newly created +// containers +func ReleaseLabel(label string) error { + selinux.ReleaseLabel(label) + return nil +} + +// DupSecOpt takes a process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) ([]string, error) { + return selinux.DupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return selinux.DisableSecOpt() +} + +// Validate checks that the label does not include unexpected options +func Validate(label string) error { + if strings.Contains(label, "z") && strings.Contains(label, "Z") { + return ErrIncompatibleLabel + } + return nil +} + +// RelabelNeeded checks whether the user requested a relabel +func RelabelNeeded(label string) bool { + return strings.Contains(label, "z") || strings.Contains(label, "Z") +} + +// IsShared checks that the label includes a "shared" mark +func IsShared(label string) bool { + return strings.Contains(label, "z") +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go new file mode 100644 index 0000000..5adafd3 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -0,0 +1,757 @@ +// +build selinux,linux + +package selinux + +import ( + "bufio" + "bytes" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" +) + +const ( + // Enforcing constant indicate SELinux is in enforcing mode + Enforcing = 1 + // Permissive constant to indicate SELinux is in permissive mode + Permissive = 0 + // Disabled constant to indicate SELinux is disabled + Disabled = -1 + + selinuxDir = "/etc/selinux/" + selinuxConfig = selinuxDir + "config" + selinuxfsMount = "/sys/fs/selinux" + selinuxTypeTag = "SELINUXTYPE" + selinuxTag = "SELINUX" + xattrNameSelinux = "security.selinux" + stRdOnly = 0x01 + selinuxfsMagic = 0xf97cff8c +) + +type selinuxState struct { + enabledSet bool + enabled bool + selinuxfsSet bool + selinuxfs string + mcsList map[string]bool + sync.Mutex +} + +var ( + // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. + ErrMCSAlreadyExists = errors.New("MCS label already exists") + // ErrEmptyPath is returned when an empty path has been specified. + ErrEmptyPath = errors.New("empty path") + // InvalidLabel is returned when an invalid label is specified. + InvalidLabel = errors.New("Invalid Label") + + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + roFileLabel string + state = selinuxState{ + mcsList: make(map[string]bool), + } +) + +// Context is a representation of the SELinux label broken into 4 parts +type Context map[string]string + +func (s *selinuxState) setEnable(enabled bool) bool { + s.Lock() + defer s.Unlock() + s.enabledSet = true + s.enabled = enabled + return s.enabled +} + +func (s *selinuxState) getEnabled() bool { + s.Lock() + enabled := s.enabled + enabledSet := s.enabledSet + s.Unlock() + if enabledSet { + return enabled + } + + enabled = false + if fs := getSelinuxMountPoint(); fs != "" { + if con, _ := CurrentLabel(); con != "kernel" { + enabled = true + } + } + return s.setEnable(enabled) +} + +// SetDisabled disables selinux support for the package +func SetDisabled() { + state.setEnable(false) +} + +func (s *selinuxState) setSELinuxfs(selinuxfs string) string { + s.Lock() + defer s.Unlock() + s.selinuxfsSet = true + s.selinuxfs = selinuxfs + return s.selinuxfs +} + +func verifySELinuxfsMount(mnt string) bool { + var buf syscall.Statfs_t + for { + err := syscall.Statfs(mnt, &buf) + if err == nil { + break + } + if err == syscall.EAGAIN { + continue + } + return false + } + if uint32(buf.Type) != uint32(selinuxfsMagic) { + return false + } + if (buf.Flags & stRdOnly) != 0 { + return false + } + + return true +} + +func findSELinuxfs() string { + // fast path: check the default mount first + if verifySELinuxfsMount(selinuxfsMount) { + return selinuxfsMount + } + + // check if selinuxfs is available before going the slow path + fs, err := ioutil.ReadFile("/proc/filesystems") + if err != nil { + return "" + } + if !bytes.Contains(fs, []byte("\tselinuxfs\n")) { + return "" + } + + // slow path: try to find among the mounts + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "" + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for { + mnt := findSELinuxfsMount(scanner) + if mnt == "" { // error or not found + return "" + } + if verifySELinuxfsMount(mnt) { + return mnt + } + } +} + +// findSELinuxfsMount returns a next selinuxfs mount point found, +// if there is one, or an empty string in case of EOF or error. +func findSELinuxfsMount(s *bufio.Scanner) string { + for s.Scan() { + txt := s.Text() + // The first field after - is fs type. + // Safe as spaces in mountpoints are encoded as \040 + if !strings.Contains(txt, " - selinuxfs ") { + continue + } + const mPos = 5 // mount point is 5th field + fields := strings.SplitN(txt, " ", mPos+1) + if len(fields) < mPos+1 { + continue + } + return fields[mPos-1] + } + + return "" +} + +func (s *selinuxState) getSELinuxfs() string { + s.Lock() + selinuxfs := s.selinuxfs + selinuxfsSet := s.selinuxfsSet + s.Unlock() + if selinuxfsSet { + return selinuxfs + } + + return s.setSELinuxfs(findSELinuxfs()) +} + +// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs +// filesystem or an empty string if no mountpoint is found. Selinuxfs is +// a proc-like pseudo-filesystem that exposes the selinux policy API to +// processes. The existence of an selinuxfs mount is used to determine +// whether selinux is currently enabled or not. +func getSelinuxMountPoint() string { + return state.getSELinuxfs() +} + +// GetEnabled returns whether selinux is currently enabled. +func GetEnabled() bool { + return state.getEnabled() +} + +func readConfig(target string) string { + var ( + val, key string + bufin *bufio.Reader + ) + + in, err := os.Open(selinuxConfig) + if err != nil { + return "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err != io.EOF { + return "" + } + done = true + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == target { + return strings.Trim(val, "\"") + } + } + } + return "" +} + +func getSELinuxPolicyRoot() string { + return filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) +} + +func readCon(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + in, err := os.Open(fpath) + if err != nil { + return "", err + } + defer in.Close() + + var retval string + if _, err := fmt.Fscanf(in, "%s", &retval); err != nil { + return "", err + } + return strings.Trim(retval, "\x00"), nil +} + +// SetFileLabel sets the SELinux label for this path or returns an error. +func SetFileLabel(fpath string, label string) error { + if fpath == "" { + return ErrEmptyPath + } + return lsetxattr(fpath, xattrNameSelinux, []byte(label), 0) +} + +// FileLabel returns the SELinux label for this path or returns an error. +func FileLabel(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + label, err := lgetxattr(fpath, xattrNameSelinux) + if err != nil { + return "", err + } + // Trim the NUL byte at the end of the byte buffer, if present. + if len(label) > 0 && label[len(label)-1] == '\x00' { + label = label[:len(label)-1] + } + return string(label), nil +} + +/* +SetFSCreateLabel tells kernel the label to create all file system objects +created by this task. Setting label="" to return to default. +*/ +func SetFSCreateLabel(label string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), label) +} + +/* +FSCreateLabel returns the default label the kernel which the kernel is using +for file system objects created by this task. "" indicates default. +*/ +func FSCreateLabel() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid())) +} + +// CurrentLabel returns the SELinux label of the current process thread, or an error. +func CurrentLabel() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid())) +} + +// PidLabel returns the SELinux label of the given pid, or an error. +func PidLabel(pid int) (string, error) { + return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) +} + +/* +ExecLabel returns the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func ExecLabel() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid())) +} + +func writeCon(fpath string, val string) error { + if fpath == "" { + return ErrEmptyPath + } + + out, err := os.OpenFile(fpath, os.O_WRONLY, 0) + if err != nil { + return err + } + defer out.Close() + + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) + } + return err +} + +/* +CanonicalizeContext takes a context string and writes it to the kernel +the function then returns the context that the kernel will use. This function +can be used to see if two contexts are equivalent +*/ +func CanonicalizeContext(val string) (string, error) { + return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val) +} + +func readWriteCon(fpath string, val string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + f, err := os.OpenFile(fpath, os.O_RDWR, 0) + if err != nil { + return "", err + } + defer f.Close() + + _, err = f.Write([]byte(val)) + if err != nil { + return "", err + } + + var retval string + if _, err := fmt.Fscanf(f, "%s", &retval); err != nil { + return "", err + } + return strings.Trim(retval, "\x00"), nil +} + +/* +SetExecLabel sets the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func SetExecLabel(label string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), label) +} + +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created +func SetSocketLabel(label string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid()), label) +} + +// SocketLabel retrieves the current socket label setting +func SocketLabel() (string, error) { + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/sockcreate", syscall.Gettid())) +} + +// Get returns the Context as a string +func (c Context) Get() string { + if c["level"] != "" { + return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) + } + return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"]) +} + +// NewContext creates a new Context struct from the specified label +func NewContext(label string) (Context, error) { + c := make(Context) + + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) < 3 { + return c, InvalidLabel + } + c["user"] = con[0] + c["role"] = con[1] + c["type"] = con[2] + if len(con) > 3 { + c["level"] = con[3] + } + } + return c, nil +} + +// ClearLabels clears all reserved labels +func ClearLabels() { + state.Lock() + state.mcsList = make(map[string]bool) + state.Unlock() +} + +// ReserveLabel reserves the MLS/MCS level component of the specified label +func ReserveLabel(label string) { + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) > 3 { + mcsAdd(con[3]) + } + } +} + +func selinuxEnforcePath() string { + return fmt.Sprintf("%s/enforce", getSelinuxMountPoint()) +} + +// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func EnforceMode() int { + var enforce int + + enforceS, err := readCon(selinuxEnforcePath()) + if err != nil { + return -1 + } + + enforce, err = strconv.Atoi(string(enforceS)) + if err != nil { + return -1 + } + return enforce +} + +/* +SetEnforceMode sets the current SELinux mode Enforcing, Permissive. +Disabled is not valid, since this needs to be set at boot time. +*/ +func SetEnforceMode(mode int) error { + return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode)) +} + +/* +DefaultEnforceMode returns the systems default SELinux mode Enforcing, +Permissive or Disabled. Note this is is just the default at boot time. +EnforceMode tells you the systems current mode. +*/ +func DefaultEnforceMode() int { + switch readConfig(selinuxTag) { + case "enforcing": + return Enforcing + case "permissive": + return Permissive + } + return Disabled +} + +func mcsAdd(mcs string) error { + if mcs == "" { + return nil + } + state.Lock() + defer state.Unlock() + if state.mcsList[mcs] { + return ErrMCSAlreadyExists + } + state.mcsList[mcs] = true + return nil +} + +func mcsDelete(mcs string) { + if mcs == "" { + return + } + state.Lock() + defer state.Unlock() + state.mcsList[mcs] = false +} + +func intToMcs(id int, catRange uint32) string { + var ( + SETSIZE = int(catRange) + TIER = SETSIZE + ORD = id + ) + + if id < 1 || id > 523776 { + return "" + } + + for ORD > TIER { + ORD = ORD - TIER + TIER-- + } + TIER = SETSIZE - TIER + ORD = ORD + TIER + return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) +} + +func uniqMcs(catRange uint32) string { + var ( + n uint32 + c1, c2 uint32 + mcs string + ) + + for { + binary.Read(rand.Reader, binary.LittleEndian, &n) + c1 = n % catRange + binary.Read(rand.Reader, binary.LittleEndian, &n) + c2 = n % catRange + if c1 == c2 { + continue + } else { + if c1 > c2 { + c1, c2 = c2, c1 + } + } + mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) + if err := mcsAdd(mcs); err != nil { + continue + } + break + } + return mcs +} + +/* +ReleaseLabel will unreserve the MLS/MCS Level field of the specified label. +Allowing it to be used by another process. +*/ +func ReleaseLabel(label string) { + if len(label) != 0 { + con := strings.SplitN(label, ":", 4) + if len(con) > 3 { + mcsDelete(con[3]) + } + } +} + +// ROFileLabel returns the specified SELinux readonly file label +func ROFileLabel() string { + return roFileLabel +} + +/* +ContainerLabels returns an allocated processLabel and fileLabel to be used for +container labeling by the calling process. +*/ +func ContainerLabels() (processLabel string, fileLabel string) { + var ( + val, key string + bufin *bufio.Reader + ) + + if !GetEnabled() { + return "", "" + } + lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot()) + in, err := os.Open(lxcPath) + if err != nil { + return "", "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + goto exit + } + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == "process" { + processLabel = strings.Trim(val, "\"") + } + if key == "file" { + fileLabel = strings.Trim(val, "\"") + } + if key == "ro_file" { + roFileLabel = strings.Trim(val, "\"") + } + } + } + + if processLabel == "" || fileLabel == "" { + return "", "" + } + + if roFileLabel == "" { + roFileLabel = fileLabel + } +exit: + scon, _ := NewContext(processLabel) + if scon["level"] != "" { + mcs := uniqMcs(1024) + scon["level"] = mcs + processLabel = scon.Get() + scon, _ = NewContext(fileLabel) + scon["level"] = mcs + fileLabel = scon.Get() + } + return processLabel, fileLabel +} + +// SecurityCheckContext validates that the SELinux label is understood by the kernel +func SecurityCheckContext(val string) error { + return writeCon(fmt.Sprintf("%s/context", getSelinuxMountPoint()), val) +} + +/* +CopyLevel returns a label with the MLS/MCS level from src label replaced on +the dest label. +*/ +func CopyLevel(src, dest string) (string, error) { + if src == "" { + return "", nil + } + if err := SecurityCheckContext(src); err != nil { + return "", err + } + if err := SecurityCheckContext(dest); err != nil { + return "", err + } + scon, err := NewContext(src) + if err != nil { + return "", err + } + tcon, err := NewContext(dest) + if err != nil { + return "", err + } + mcsDelete(tcon["level"]) + mcsAdd(scon["level"]) + tcon["level"] = scon["level"] + return tcon.Get(), nil +} + +// Prevent users from relabing system files +func badPrefix(fpath string) error { + if fpath == "" { + return ErrEmptyPath + } + + badPrefixes := []string{"/usr"} + for _, prefix := range badPrefixes { + if strings.HasPrefix(fpath, prefix) { + return fmt.Errorf("relabeling content in %s is not allowed", prefix) + } + } + return nil +} + +// Chcon changes the `fpath` file object to the SELinux label `label`. +// If `fpath` is a directory and `recurse`` is true, Chcon will walk the +// directory tree setting the label. +func Chcon(fpath string, label string, recurse bool) error { + if fpath == "" { + return ErrEmptyPath + } + if label == "" { + return nil + } + if err := badPrefix(fpath); err != nil { + return err + } + callback := func(p string, info os.FileInfo, err error) error { + e := SetFileLabel(p, label) + if os.IsNotExist(e) { + return nil + } + return e + } + + if recurse { + return filepath.Walk(fpath, callback) + } + + return SetFileLabel(fpath, label) +} + +// DupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func DupSecOpt(src string) ([]string, error) { + if src == "" { + return nil, nil + } + con, err := NewContext(src) + if err != nil { + return nil, err + } + if con["user"] == "" || + con["role"] == "" || + con["type"] == "" { + return nil, nil + } + dup := []string{"user:" + con["user"], + "role:" + con["role"], + "type:" + con["type"], + } + + if con["level"] != "" { + dup = append(dup, "level:"+con["level"]) + } + + return dup, nil +} + +// DisableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. +func DisableSecOpt() []string { + return []string{"disable"} +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go new file mode 100644 index 0000000..9497acb --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -0,0 +1,206 @@ +// +build !selinux + +package selinux + +import ( + "errors" +) + +const ( + // Enforcing constant indicate SELinux is in enforcing mode + Enforcing = 1 + // Permissive constant to indicate SELinux is in permissive mode + Permissive = 0 + // Disabled constant to indicate SELinux is disabled + Disabled = -1 +) + +var ( + // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. + ErrMCSAlreadyExists = errors.New("MCS label already exists") + // ErrEmptyPath is returned when an empty path has been specified. + ErrEmptyPath = errors.New("empty path") +) + +// Context is a representation of the SELinux label broken into 4 parts +type Context map[string]string + +// SetDisabled disables selinux support for the package +func SetDisabled() { + return +} + +// GetEnabled returns whether selinux is currently enabled. +func GetEnabled() bool { + return false +} + +// SetFileLabel sets the SELinux label for this path or returns an error. +func SetFileLabel(fpath string, label string) error { + return nil +} + +// FileLabel returns the SELinux label for this path or returns an error. +func FileLabel(fpath string) (string, error) { + return "", nil +} + +/* +SetFSCreateLabel tells kernel the label to create all file system objects +created by this task. Setting label="" to return to default. +*/ +func SetFSCreateLabel(label string) error { + return nil +} + +/* +FSCreateLabel returns the default label the kernel which the kernel is using +for file system objects created by this task. "" indicates default. +*/ +func FSCreateLabel() (string, error) { + return "", nil +} + +// CurrentLabel returns the SELinux label of the current process thread, or an error. +func CurrentLabel() (string, error) { + return "", nil +} + +// PidLabel returns the SELinux label of the given pid, or an error. +func PidLabel(pid int) (string, error) { + return "", nil +} + +/* +ExecLabel returns the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func ExecLabel() (string, error) { + return "", nil +} + +/* +CanonicalizeContext takes a context string and writes it to the kernel +the function then returns the context that the kernel will use. This function +can be used to see if two contexts are equivalent +*/ +func CanonicalizeContext(val string) (string, error) { + return "", nil +} + +/* +SetExecLabel sets the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func SetExecLabel(label string) error { + return nil +} + +/* +SetSocketLabel sets the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func SetSocketLabel(label string) error { + return nil +} + +// SocketLabel retrieves the current socket label setting +func SocketLabel() (string, error) { + return "", nil +} + +// Get returns the Context as a string +func (c Context) Get() string { + return "" +} + +// NewContext creates a new Context struct from the specified label +func NewContext(label string) (Context, error) { + c := make(Context) + return c, nil +} + +// ClearLabels clears all reserved MLS/MCS levels +func ClearLabels() { + return +} + +// ReserveLabel reserves the MLS/MCS level component of the specified label +func ReserveLabel(label string) { + return +} + +// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func EnforceMode() int { + return Disabled +} + +/* +SetEnforceMode sets the current SELinux mode Enforcing, Permissive. +Disabled is not valid, since this needs to be set at boot time. +*/ +func SetEnforceMode(mode int) error { + return nil +} + +/* +DefaultEnforceMode returns the systems default SELinux mode Enforcing, +Permissive or Disabled. Note this is is just the default at boot time. +EnforceMode tells you the systems current mode. +*/ +func DefaultEnforceMode() int { + return Disabled +} + +/* +ReleaseLabel will unreserve the MLS/MCS Level field of the specified label. +Allowing it to be used by another process. +*/ +func ReleaseLabel(label string) { + return +} + +// ROFileLabel returns the specified SELinux readonly file label +func ROFileLabel() string { + return "" +} + +/* +ContainerLabels returns an allocated processLabel and fileLabel to be used for +container labeling by the calling process. +*/ +func ContainerLabels() (processLabel string, fileLabel string) { + return "", "" +} + +// SecurityCheckContext validates that the SELinux label is understood by the kernel +func SecurityCheckContext(val string) error { + return nil +} + +/* +CopyLevel returns a label with the MLS/MCS level from src label replaced on +the dest label. +*/ +func CopyLevel(src, dest string) (string, error) { + return "", nil +} + +// Chcon changes the `fpath` file object to the SELinux label `label`. +// If `fpath` is a directory and `recurse`` is true, Chcon will walk the +// directory tree setting the label. +func Chcon(fpath string, label string, recurse bool) error { + return nil +} + +// DupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func DupSecOpt(src string) ([]string, error) { + return nil, nil +} + +// DisableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. +func DisableSecOpt() []string { + return []string{"disable"} +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go new file mode 100644 index 0000000..67a9d8e --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go @@ -0,0 +1,78 @@ +// +build selinux,linux + +package selinux + +import ( + "syscall" + "unsafe" +) + +var _zero uintptr + +// Returns a []byte slice if the xattr is set and nil otherwise +// Requires path and its attribute as arguments +func lgetxattr(path string, attr string) ([]byte, error) { + var sz int + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + // Start with a 128 length byte array + sz = 128 + dest := make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + + switch { + case errno == syscall.ENODATA: + return nil, errno + case errno == syscall.ENOTSUP: + return nil, errno + case errno == syscall.ERANGE: + // 128 byte array might just not be good enough, + // A dummy buffer is used ``uintptr(0)`` to get real size + // of the xattrs on disk + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) + sz = int(_sz) + if sz < 0 { + return nil, errno + } + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno != 0 { + return nil, errno + } + case errno != 0: + return nil, errno + } + sz = int(_sz) + return dest[:sz], nil +} + +func lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/pborman/uuid/LICENSE similarity index 95% rename from vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD rename to vendor/github.com/pborman/uuid/LICENSE index 9b4f4a2..5dc6826 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ b/vendor/github.com/pborman/uuid/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. +Copyright (c) 2009,2014 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md new file mode 100644 index 0000000..f023d47 --- /dev/null +++ b/vendor/github.com/pborman/uuid/README.md @@ -0,0 +1,13 @@ +This project was automatically exported from code.google.com/p/go-uuid + +# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on [RFC 412](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. + +###### Install +`go get github.com/pborman/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) + +Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: +http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go new file mode 100755 index 0000000..50a0f2d --- /dev/null +++ b/vendor/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go new file mode 100755 index 0000000..d8bd013 --- /dev/null +++ b/vendor/github.com/pborman/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go new file mode 100644 index 0000000..a0420c1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go new file mode 100644 index 0000000..9dda1df --- /dev/null +++ b/vendor/github.com/pborman/uuid/json.go @@ -0,0 +1,34 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "errors" + +func (u UUID) MarshalJSON() ([]byte, error) { + if len(u) != 16 { + return []byte(`""`), nil + } + var js [38]byte + js[0] = '"' + encodeHex(js[1:], u) + js[37] = '"' + return js[:], nil +} + +func (u *UUID) UnmarshalJSON(data []byte) error { + if string(data) == `""` { + return nil + } + if data[0] != '"' { + return errors.New("invalid UUID format") + } + data = data[1 : len(data)-1] + uu := Parse(string(data)) + if uu == nil { + return errors.New("invalid UUID format") + } + *u = uu + return nil +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go new file mode 100755 index 0000000..42d60da --- /dev/null +++ b/vendor/github.com/pborman/uuid/node.go @@ -0,0 +1,117 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "net" + "sync" +) + +var ( + nodeMu sync.Mutex + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == nil { + setNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 0000000..c84f900 --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,58 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + *uuid = UUID(b) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go new file mode 100755 index 0000000..eedf242 --- /dev/null +++ b/vendor/github.com/pborman/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go new file mode 100644 index 0000000..fc8e052 --- /dev/null +++ b/vendor/github.com/pborman/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go new file mode 100644 index 0000000..c4482cd --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -0,0 +1,176 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + var uuid [16]byte + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid[:] +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if len(uuid) != 16 { + return "" + } + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if len(uuid) != 16 { + return "" + } + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go new file mode 100644 index 0000000..0127eac --- /dev/null +++ b/vendor/github.com/pborman/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go new file mode 100644 index 0000000..b3d4a36 --- /dev/null +++ b/vendor/github.com/pborman/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go new file mode 100644 index 0000000..7f63a85 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go @@ -0,0 +1,421 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +// Simple byte buffer for marshaling data. + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "unicode/utf8" +) + +type grower interface { + Grow(n int) +} + +type truncater interface { + Truncate(n int) + Reset() +} + +type bytesReader interface { + Bytes() []byte + String() string +} + +type runeWriter interface { + WriteRune(r rune) (n int, err error) +} + +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +type lener interface { + Len() int +} + +type rewinder interface { + Rewind(n int) (err error) +} + +type encoder interface { + Encode(interface{}) error +} + +// TODO(pquerna): continue to reduce these interfaces + +type EncodingBuffer interface { + io.Writer + io.WriterTo + io.ByteWriter + stringWriter + truncater + grower + rewinder + encoder +} + +type DecodingBuffer interface { + io.ReadWriter + io.ByteWriter + stringWriter + runeWriter + truncater + grower + bytesReader + lener +} + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune + encoder *json.Encoder + skipTrailingByte bool +} + +// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. +var ErrTooLarge = errors.New("fflib.v1.Buffer: too large") + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + if n == 0 { + b.off = 0 + b.buf = b.buf[0:0] + } else { + b.buf = b.buf[0 : b.off+n] + } +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) grow(n int) int { + // If we have no buffer, get one from the pool + m := b.Len() + if m == 0 { + if b.buf == nil { + b.buf = makeSlice(2 * n) + b.off = 0 + } else if b.off != 0 { + // If buffer is empty, reset to recover space. + b.Truncate(0) + } + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + Pool(b.buf) + b.buf = buf + } + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("bytes.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + if b.skipTrailingByte { + p = p[:len(p)-1] + } + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// WriteString appends the contents of s to the buffer, growing the buffer as +// needed. The return value n is the length of s; err is always nil. If the +// buffer becomes too large, WriteString will panic with ErrTooLarge. +func (b *Buffer) WriteString(s string) (n int, err error) { + m := b.grow(len(s)) + return copy(b.buf[m:], s), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const minRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < minRead { + // not enough space at end + newBuf := b.buf + if b.off+free < minRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + minRead) + } + copy(newBuf, b.buf[b.off:]) + Pool(b.buf) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("bytes.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + m := b.grow(1) + b.buf[m] = c + return nil +} + +func (b *Buffer) Rewind(n int) error { + b.buf = b.buf[:len(b.buf)-n] + return nil +} + +func (b *Buffer) Encode(v interface{}) error { + if b.encoder == nil { + b.encoder = json.NewEncoder(b) + } + b.skipTrailingByte = true + err := b.encoder.Encode(v) + b.skipTrailingByte = false + return err +} + +// WriteRune appends the UTF-8 encoding of Unicode code point r to the +// buffer, returning its length and an error, which is always nil but is +// included to match bufio.Writer's WriteRune. The buffer is grown as needed; +// if it becomes too large, WriteRune will panic with ErrTooLarge. +func (b *Buffer) WriteRune(r rune) (n int, err error) { + if r < utf8.RuneSelf { + b.WriteByte(byte(r)) + return 1, nil + } + n = utf8.EncodeRune(b.runeBytes[0:], r) + b.Write(b.runeBytes[0:n]) + return n, nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + return c, nil +} + +// ReadRune reads and returns the next UTF-8-encoded +// Unicode code point from the buffer. +// If no bytes are available, the error returned is io.EOF. +// If the bytes are an erroneous UTF-8 encoding, it +// consumes one byte and returns U+FFFD, 1. +func (b *Buffer) ReadRune() (r rune, size int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, 0, io.EOF + } + c := b.buf[b.off] + if c < utf8.RuneSelf { + b.off++ + return rune(c), 1, nil + } + r, n := utf8.DecodeRune(b.buf[b.off:]) + b.off += n + return r, n, nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + return line, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end +// in delim. +func (b *Buffer) ReadString(delim byte) (line string, err error) { + slice, err := b.readSlice(delim) + return string(slice), err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } + +// NewBufferString creates and initializes a new Buffer using string s as its +// initial contents. It is intended to prepare a buffer to read an existing +// string. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBufferString(s string) *Buffer { + return &Buffer{buf: []byte(s)} +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go new file mode 100644 index 0000000..b84af6f --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go @@ -0,0 +1,11 @@ +// +build !go1.3 + +package v1 + +// Stub version of buffer_pool.go for Go 1.2, which doesn't have sync.Pool. + +func Pool(b []byte) {} + +func makeSlice(n int) []byte { + return make([]byte, n) +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go new file mode 100644 index 0000000..a021c57 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go @@ -0,0 +1,105 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.3 + +package v1 + +// Allocation pools for Buffers. + +import "sync" + +var pools [14]sync.Pool +var pool64 *sync.Pool + +func init() { + var i uint + // TODO(pquerna): add science here around actual pool sizes. + for i = 6; i < 20; i++ { + n := 1 << i + pools[poolNum(n)].New = func() interface{} { return make([]byte, 0, n) } + } + pool64 = &pools[0] +} + +// This returns the pool number that will give a buffer of +// at least 'i' bytes. +func poolNum(i int) int { + // TODO(pquerna): convert to log2 w/ bsr asm instruction: + // + if i <= 64 { + return 0 + } else if i <= 128 { + return 1 + } else if i <= 256 { + return 2 + } else if i <= 512 { + return 3 + } else if i <= 1024 { + return 4 + } else if i <= 2048 { + return 5 + } else if i <= 4096 { + return 6 + } else if i <= 8192 { + return 7 + } else if i <= 16384 { + return 8 + } else if i <= 32768 { + return 9 + } else if i <= 65536 { + return 10 + } else if i <= 131072 { + return 11 + } else if i <= 262144 { + return 12 + } else if i <= 524288 { + return 13 + } else { + return -1 + } +} + +// Send a buffer to the Pool to reuse for other instances. +// You may no longer utilize the content of the buffer, since it may be used +// by other goroutines. +func Pool(b []byte) { + if b == nil { + return + } + c := cap(b) + + // Our smallest buffer is 64 bytes, so we discard smaller buffers. + if c < 64 { + return + } + + // We need to put the incoming buffer into the NEXT buffer, + // since a buffer guarantees AT LEAST the number of bytes available + // that is the top of this buffer. + // That is the reason for dividing the cap by 2, so it gets into the NEXT bucket. + // We add 2 to avoid rounding down if size is exactly power of 2. + pn := poolNum((c + 2) >> 1) + if pn != -1 { + pools[pn].Put(b[0:0]) + } + // if we didn't have a slot for this []byte, we just drop it and let the GC + // take care of it. +} + +// makeSlice allocates a slice of size n -- it will attempt to use a pool'ed +// instance whenever possible. +func makeSlice(n int) []byte { + if n <= 64 { + return pool64.Get().([]byte)[0:n] + } + + pn := poolNum(n) + + if pn != -1 { + return pools[pn].Get().([]byte)[0:n] + } else { + return make([]byte, n) + } +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go b/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go new file mode 100644 index 0000000..0847740 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go @@ -0,0 +1,88 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/iota.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "github.com/pquerna/ffjson/fflib/v1/internal" +) + +func ParseFloat(s []byte, bitSize int) (f float64, err error) { + return internal.ParseFloat(s, bitSize) +} + +// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte +func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { + if len(s) == 1 { + switch s[0] { + case '0': + return 0, nil + case '1': + return 1, nil + case '2': + return 2, nil + case '3': + return 3, nil + case '4': + return 4, nil + case '5': + return 5, nil + case '6': + return 6, nil + case '7': + return 7, nil + case '8': + return 8, nil + case '9': + return 9, nil + } + } + return internal.ParseUint(s, base, bitSize) +} + +func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { + if len(s) == 1 { + switch s[0] { + case '0': + return 0, nil + case '1': + return 1, nil + case '2': + return 2, nil + case '3': + return 3, nil + case '4': + return 4, nil + case '5': + return 5, nil + case '6': + return 6, nil + case '7': + return 7, nil + case '8': + return 8, nil + case '9': + return 9, nil + } + } + return internal.ParseInt(s, base, bitSize) +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go b/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go new file mode 100644 index 0000000..069df7a --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go @@ -0,0 +1,378 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package v1 + +type decimal struct { + d [800]byte // digits + nd int // number of digits used + dp int // decimal point + neg bool + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// Signed int has 31 bits, and we have to be able to accommodate 9<>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := int(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := int(a.d[r]) + dig := n >> k + n -= dig << k + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n -= dig << k + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + /* + seq 27 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + n := 0 + + // Pick up a digit, put down a digit. + for r--; r >= 0; r-- { + n += (int(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go b/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go new file mode 100644 index 0000000..508ddc6 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go @@ -0,0 +1,668 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +// An extFloat represents an extended floating-point number, with more +// precision than a float64. It does not try to save bits: the +// number represented by the structure is mant*(2^exp), with a negative +// sign if neg is true. +type extFloat struct { + mant uint64 + exp int + neg bool +} + +// Powers of ten taken from double-conversion library. +// http://code.google.com/p/double-conversion/ +const ( + firstPowerOfTen = -348 + stepPowerOfTen = 8 +) + +var smallPowersOfTen = [...]extFloat{ + {1 << 63, -63, false}, // 1 + {0xa << 60, -60, false}, // 1e1 + {0x64 << 57, -57, false}, // 1e2 + {0x3e8 << 54, -54, false}, // 1e3 + {0x2710 << 50, -50, false}, // 1e4 + {0x186a0 << 47, -47, false}, // 1e5 + {0xf4240 << 44, -44, false}, // 1e6 + {0x989680 << 40, -40, false}, // 1e7 +} + +var powersOfTen = [...]extFloat{ + {0xfa8fd5a0081c0288, -1220, false}, // 10^-348 + {0xbaaee17fa23ebf76, -1193, false}, // 10^-340 + {0x8b16fb203055ac76, -1166, false}, // 10^-332 + {0xcf42894a5dce35ea, -1140, false}, // 10^-324 + {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 + {0xe61acf033d1a45df, -1087, false}, // 10^-308 + {0xab70fe17c79ac6ca, -1060, false}, // 10^-300 + {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 + {0xbe5691ef416bd60c, -1007, false}, // 10^-284 + {0x8dd01fad907ffc3c, -980, false}, // 10^-276 + {0xd3515c2831559a83, -954, false}, // 10^-268 + {0x9d71ac8fada6c9b5, -927, false}, // 10^-260 + {0xea9c227723ee8bcb, -901, false}, // 10^-252 + {0xaecc49914078536d, -874, false}, // 10^-244 + {0x823c12795db6ce57, -847, false}, // 10^-236 + {0xc21094364dfb5637, -821, false}, // 10^-228 + {0x9096ea6f3848984f, -794, false}, // 10^-220 + {0xd77485cb25823ac7, -768, false}, // 10^-212 + {0xa086cfcd97bf97f4, -741, false}, // 10^-204 + {0xef340a98172aace5, -715, false}, // 10^-196 + {0xb23867fb2a35b28e, -688, false}, // 10^-188 + {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180 + {0xc5dd44271ad3cdba, -635, false}, // 10^-172 + {0x936b9fcebb25c996, -608, false}, // 10^-164 + {0xdbac6c247d62a584, -582, false}, // 10^-156 + {0xa3ab66580d5fdaf6, -555, false}, // 10^-148 + {0xf3e2f893dec3f126, -529, false}, // 10^-140 + {0xb5b5ada8aaff80b8, -502, false}, // 10^-132 + {0x87625f056c7c4a8b, -475, false}, // 10^-124 + {0xc9bcff6034c13053, -449, false}, // 10^-116 + {0x964e858c91ba2655, -422, false}, // 10^-108 + {0xdff9772470297ebd, -396, false}, // 10^-100 + {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92 + {0xf8a95fcf88747d94, -343, false}, // 10^-84 + {0xb94470938fa89bcf, -316, false}, // 10^-76 + {0x8a08f0f8bf0f156b, -289, false}, // 10^-68 + {0xcdb02555653131b6, -263, false}, // 10^-60 + {0x993fe2c6d07b7fac, -236, false}, // 10^-52 + {0xe45c10c42a2b3b06, -210, false}, // 10^-44 + {0xaa242499697392d3, -183, false}, // 10^-36 + {0xfd87b5f28300ca0e, -157, false}, // 10^-28 + {0xbce5086492111aeb, -130, false}, // 10^-20 + {0x8cbccc096f5088cc, -103, false}, // 10^-12 + {0xd1b71758e219652c, -77, false}, // 10^-4 + {0x9c40000000000000, -50, false}, // 10^4 + {0xe8d4a51000000000, -24, false}, // 10^12 + {0xad78ebc5ac620000, 3, false}, // 10^20 + {0x813f3978f8940984, 30, false}, // 10^28 + {0xc097ce7bc90715b3, 56, false}, // 10^36 + {0x8f7e32ce7bea5c70, 83, false}, // 10^44 + {0xd5d238a4abe98068, 109, false}, // 10^52 + {0x9f4f2726179a2245, 136, false}, // 10^60 + {0xed63a231d4c4fb27, 162, false}, // 10^68 + {0xb0de65388cc8ada8, 189, false}, // 10^76 + {0x83c7088e1aab65db, 216, false}, // 10^84 + {0xc45d1df942711d9a, 242, false}, // 10^92 + {0x924d692ca61be758, 269, false}, // 10^100 + {0xda01ee641a708dea, 295, false}, // 10^108 + {0xa26da3999aef774a, 322, false}, // 10^116 + {0xf209787bb47d6b85, 348, false}, // 10^124 + {0xb454e4a179dd1877, 375, false}, // 10^132 + {0x865b86925b9bc5c2, 402, false}, // 10^140 + {0xc83553c5c8965d3d, 428, false}, // 10^148 + {0x952ab45cfa97a0b3, 455, false}, // 10^156 + {0xde469fbd99a05fe3, 481, false}, // 10^164 + {0xa59bc234db398c25, 508, false}, // 10^172 + {0xf6c69a72a3989f5c, 534, false}, // 10^180 + {0xb7dcbf5354e9bece, 561, false}, // 10^188 + {0x88fcf317f22241e2, 588, false}, // 10^196 + {0xcc20ce9bd35c78a5, 614, false}, // 10^204 + {0x98165af37b2153df, 641, false}, // 10^212 + {0xe2a0b5dc971f303a, 667, false}, // 10^220 + {0xa8d9d1535ce3b396, 694, false}, // 10^228 + {0xfb9b7cd9a4a7443c, 720, false}, // 10^236 + {0xbb764c4ca7a44410, 747, false}, // 10^244 + {0x8bab8eefb6409c1a, 774, false}, // 10^252 + {0xd01fef10a657842c, 800, false}, // 10^260 + {0x9b10a4e5e9913129, 827, false}, // 10^268 + {0xe7109bfba19c0c9d, 853, false}, // 10^276 + {0xac2820d9623bf429, 880, false}, // 10^284 + {0x80444b5e7aa7cf85, 907, false}, // 10^292 + {0xbf21e44003acdd2d, 933, false}, // 10^300 + {0x8e679c2f5e44ff8f, 960, false}, // 10^308 + {0xd433179d9c8cb841, 986, false}, // 10^316 + {0x9e19db92b4e31ba9, 1013, false}, // 10^324 + {0xeb96bf6ebadf77d9, 1039, false}, // 10^332 + {0xaf87023b9bf0ee6b, 1066, false}, // 10^340 +} + +// floatBits returns the bits of the float64 that best approximates +// the extFloat passed as receiver. Overflow is set to true if +// the resulting float64 is ±Inf. +func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { + f.Normalize() + + exp := f.exp + 63 + + // Exponent too small. + if exp < flt.bias+1 { + n := flt.bias + 1 - exp + f.mant >>= uint(n) + exp += n + } + + // Extract 1+flt.mantbits bits from the 64-bit mantissa. + mant := f.mant >> (63 - flt.mantbits) + if f.mant&(1<<(62-flt.mantbits)) != 0 { + // Round up. + mant += 1 + } + + // Rounding might have added a bit; shift down. + if mant == 2<>= 1 + exp++ + } + + // Infinities. + if exp-flt.bias >= 1<>uint(-f.exp))<>= uint(-f.exp) + f.exp = 0 + return *f, *f + } + expBiased := exp - flt.bias + + upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} + if mant != 1<>(64-32) == 0 { + mant <<= 32 + exp -= 32 + } + if mant>>(64-16) == 0 { + mant <<= 16 + exp -= 16 + } + if mant>>(64-8) == 0 { + mant <<= 8 + exp -= 8 + } + if mant>>(64-4) == 0 { + mant <<= 4 + exp -= 4 + } + if mant>>(64-2) == 0 { + mant <<= 2 + exp -= 2 + } + if mant>>(64-1) == 0 { + mant <<= 1 + exp -= 1 + } + shift = uint(f.exp - exp) + f.mant, f.exp = mant, exp + return +} + +// Multiply sets f to the product f*g: the result is correctly rounded, +// but not normalized. +func (f *extFloat) Multiply(g extFloat) { + fhi, flo := f.mant>>32, uint64(uint32(f.mant)) + ghi, glo := g.mant>>32, uint64(uint32(g.mant)) + + // Cross products. + cross1 := fhi * glo + cross2 := flo * ghi + + // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo + f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) + rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) + // Round up. + rem += (1 << 31) + + f.mant += (rem >> 32) + f.exp = f.exp + g.exp + 64 +} + +var uint64pow10 = [...]uint64{ + 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, +} + +// AssignDecimal sets f to an approximate value mantissa*10^exp. It +// returns true if the value represented by f is guaranteed to be the +// best approximation of d after being rounded to a float64 or +// float32 depending on flt. +func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { + const uint64digits = 19 + const errorscale = 8 + errors := 0 // An upper bound for error, computed in errorscale*ulp. + if trunc { + // the decimal number was truncated. + errors += errorscale / 2 + } + + f.mant = mantissa + f.exp = 0 + f.neg = neg + + // Multiply by powers of ten. + i := (exp10 - firstPowerOfTen) / stepPowerOfTen + if exp10 < firstPowerOfTen || i >= len(powersOfTen) { + return false + } + adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen + + // We multiply by exp%step + if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { + // We can multiply the mantissa exactly. + f.mant *= uint64pow10[adjExp] + f.Normalize() + } else { + f.Normalize() + f.Multiply(smallPowersOfTen[adjExp]) + errors += errorscale / 2 + } + + // We multiply by 10 to the exp - exp%step. + f.Multiply(powersOfTen[i]) + if errors > 0 { + errors += 1 + } + errors += errorscale / 2 + + // Normalize + shift := f.Normalize() + errors <<= shift + + // Now f is a good approximation of the decimal. + // Check whether the error is too large: that is, if the mantissa + // is perturbated by the error, the resulting float64 will change. + // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. + // + // In many cases the approximation will be good enough. + denormalExp := flt.bias - 63 + var extrabits uint + if f.exp <= denormalExp { + // f.mant * 2^f.exp is smaller than 2^(flt.bias+1). + extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) + } else { + extrabits = uint(63 - flt.mantbits) + } + + halfway := uint64(1) << (extrabits - 1) + mant_extra := f.mant & (1< expMax: + i-- + default: + break Loop + } + } + // Apply the desired decimal shift on f. It will have exponent + // in the desired range. This is multiplication by 10^-exp10. + f.Multiply(powersOfTen[i]) + + return -(firstPowerOfTen + i*stepPowerOfTen), i +} + +// frexp10Many applies a common shift by a power of ten to a, b, c. +func frexp10Many(a, b, c *extFloat) (exp10 int) { + exp10, i := c.frexp10() + a.Multiply(powersOfTen[i]) + b.Multiply(powersOfTen[i]) + return +} + +// FixedDecimal stores in d the first n significant digits +// of the decimal representation of f. It returns false +// if it cannot be sure of the answer. +func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if n == 0 { + panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") + } + // Multiply by an appropriate power of ten to have a reasonable + // number to process. + f.Normalize() + exp10, _ := f.frexp10() + + shift := uint(-f.exp) + integer := uint32(f.mant >> shift) + fraction := f.mant - (uint64(integer) << shift) + ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. + + // Write exactly n digits to d. + needed := n // how many digits are left to write. + integerDigits := 0 // the number of decimal digits of integer. + pow10 := uint64(1) // the power of ten by which f was scaled. + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + rest := integer + if integerDigits > needed { + // the integral part is already large, trim the last digits. + pow10 = uint64pow10[integerDigits-needed] + integer /= uint32(pow10) + rest -= integer * uint32(pow10) + } else { + rest = 0 + } + + // Write the digits of integer: the digits of rest are omitted. + var buf [32]byte + pos := len(buf) + for v := integer; v > 0; { + v1 := v / 10 + v -= 10 * v1 + pos-- + buf[pos] = byte(v + '0') + v = v1 + } + for i := pos; i < len(buf); i++ { + d.d[i-pos] = buf[i] + } + nd := len(buf) - pos + d.nd = nd + d.dp = integerDigits + exp10 + needed -= nd + + if needed > 0 { + if rest != 0 || pow10 != 1 { + panic("strconv: internal error, rest != 0 but needed > 0") + } + // Emit digits for the fractional part. Each time, 10*fraction + // fits in a uint64 without overflow. + for needed > 0 { + fraction *= 10 + ε *= 10 // the uncertainty scales as we multiply by ten. + if 2*ε > 1<> shift + d.d[nd] = byte(digit + '0') + fraction -= digit << shift + nd++ + needed-- + } + d.nd = nd + } + + // We have written a truncation of f (a numerator / 10^d.dp). The remaining part + // can be interpreted as a small number (< 1) to be added to the last digit of the + // numerator. + // + // If rest > 0, the amount is: + // (rest< 0 guarantees that pow10 << shift does not overflow a uint64. + // + // If rest = 0, pow10 == 1 and the amount is + // fraction / (1 << shift) + // fraction being known with a ±ε uncertainty. + // + // We pass this information to the rounding routine for adjustment. + + ok := adjustLastDigitFixed(d, uint64(rest)<= 0; i-- { + if d.d[i] != '0' { + d.nd = i + 1 + break + } + } + return true +} + +// adjustLastDigitFixed assumes d contains the representation of the integral part +// of some number, whose fractional part is num / (den << shift). The numerator +// num is only known up to an uncertainty of size ε, assumed to be less than +// (den << shift)/2. +// +// It will increase the last digit by one to account for correct rounding, typically +// when the fractional part is greater than 1/2, and will return false if ε is such +// that no correct answer can be given. +func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { + if num > den< den< den< (den< den<= 0; i-- { + if d.d[i] == '9' { + d.nd-- + } else { + break + } + } + if i < 0 { + d.d[0] = '1' + d.nd = 1 + d.dp++ + } else { + d.d[i]++ + } + return true + } + return false +} + +// ShortestDecimal stores in d the shortest decimal representation of f +// which belongs to the open interval (lower, upper), where f is supposed +// to lie. It returns false whenever the result is unsure. The implementation +// uses the Grisu3 algorithm. +func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if f.exp == 0 && *lower == *f && *lower == *upper { + // an exact integer. + var buf [24]byte + n := len(buf) - 1 + for v := f.mant; v > 0; { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n-- + v = v1 + } + nd := len(buf) - n - 1 + for i := 0; i < nd; i++ { + d.d[i] = buf[n+1+i] + } + d.nd, d.dp = nd, nd + for d.nd > 0 && d.d[d.nd-1] == '0' { + d.nd-- + } + if d.nd == 0 { + d.dp = 0 + } + d.neg = f.neg + return true + } + upper.Normalize() + // Uniformize exponents. + if f.exp > upper.exp { + f.mant <<= uint(f.exp - upper.exp) + f.exp = upper.exp + } + if lower.exp > upper.exp { + lower.mant <<= uint(lower.exp - upper.exp) + lower.exp = upper.exp + } + + exp10 := frexp10Many(lower, f, upper) + // Take a safety margin due to rounding in frexp10Many, but we lose precision. + upper.mant++ + lower.mant-- + + // The shortest representation of f is either rounded up or down, but + // in any case, it is a truncation of upper. + shift := uint(-upper.exp) + integer := uint32(upper.mant >> shift) + fraction := upper.mant - (uint64(integer) << shift) + + // How far we can go down from upper until the result is wrong. + allowance := upper.mant - lower.mant + // How far we should go to get a very precise result. + targetDiff := upper.mant - f.mant + + // Count integral digits: there are at most 10. + var integerDigits int + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + for i := 0; i < integerDigits; i++ { + pow := uint64pow10[integerDigits-i-1] + digit := integer / uint32(pow) + d.d[i] = byte(digit + '0') + integer -= digit * uint32(pow) + // evaluate whether we should stop. + if currentDiff := uint64(integer)<> shift) + d.d[d.nd] = byte(digit + '0') + d.nd++ + fraction -= uint64(digit) << shift + if fraction < allowance*multiplier { + // We are in the admissible range. Note that if allowance is about to + // overflow, that is, allowance > 2^64/10, the condition is automatically + // true due to the limited range of fraction. + return adjustLastDigit(d, + fraction, targetDiff*multiplier, allowance*multiplier, + 1< maxDiff-ulpBinary { + // we went too far + return false + } + if d.nd == 1 && d.d[0] == '0' { + // the number has actually reached zero. + d.nd = 0 + d.dp = 0 + } + return true +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go b/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go new file mode 100644 index 0000000..4d33e6f --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go @@ -0,0 +1,121 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's encoding/json/fold.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func EqualFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func AsciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func SimpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go b/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go new file mode 100644 index 0000000..360d6db --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go @@ -0,0 +1,542 @@ +package v1 + +/** + * Copyright 2015 Paul Querna, Klaus Post + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Most of this file are on Go stdlib's strconv/ftoa.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import "math" + +// TODO: move elsewhere? +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var optimize = true // can change for testing + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// AppendFloat appends the string form of the floating-point number f, +// as generated by FormatFloat +func AppendFloat(dst EncodingBuffer, val float64, fmt byte, prec, bitSize int) { + var bits uint64 + var flt *floatInfo + switch bitSize { + case 32: + bits = uint64(math.Float32bits(float32(val))) + flt = &float32info + case 64: + bits = math.Float64bits(val) + flt = &float64info + default: + panic("strconv: illegal AppendFloat/FormatFloat bitSize") + } + + neg := bits>>(flt.expbits+flt.mantbits) != 0 + exp := int(bits>>flt.mantbits) & (1< digs.nd && digs.nd >= digs.dp { + eprec = digs.nd + } + // %e is used if the exponent from the conversion + // is less than -4 or greater than or equal to the precision. + // if precision was the shortest possible, use precision 6 for this decision. + if shortest { + eprec = 6 + } + exp := digs.dp - 1 + if exp < -4 || exp >= eprec { + if prec > digs.nd { + prec = digs.nd + } + fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') + return + } + if prec > digs.dp { + prec = digs.nd + } + fmtF(dst, neg, digs, max(prec-digs.dp, 0)) + return + } + + // unknown format + dst.Write([]byte{'%', fmt}) + return +} + +// Round d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely +// reconstructed. Size is original floating point size (64 or 32). +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1< 0 { + dst.WriteByte('.') + i := 1 + m := min(d.nd, prec+1) + if i < m { + dst.Write(d.d[i:m]) + i = m + } + for i <= prec { + dst.WriteByte('0') + i++ + } + } + + // e± + dst.WriteByte(fmt) + exp := d.dp - 1 + if d.nd == 0 { // special case: 0 has exponent 0 + exp = 0 + } + if exp < 0 { + ch = '-' + exp = -exp + } else { + ch = '+' + } + dst.WriteByte(ch) + + // dd or ddd + switch { + case exp < 10: + dst.WriteByte('0') + dst.WriteByte(byte(exp) + '0') + case exp < 100: + dst.WriteByte(byte(exp/10) + '0') + dst.WriteByte(byte(exp%10) + '0') + default: + dst.WriteByte(byte(exp/100) + '0') + dst.WriteByte(byte(exp/10)%10 + '0') + dst.WriteByte(byte(exp%10) + '0') + } + + return +} + +// %f: -ddddddd.ddddd +func fmtF(dst EncodingBuffer, neg bool, d decimalSlice, prec int) { + // sign + if neg { + dst.WriteByte('-') + } + + // integer, padded with zeros as needed. + if d.dp > 0 { + m := min(d.nd, d.dp) + dst.Write(d.d[:m]) + for ; m < d.dp; m++ { + dst.WriteByte('0') + } + } else { + dst.WriteByte('0') + } + + // fraction + if prec > 0 { + dst.WriteByte('.') + for i := 0; i < prec; i++ { + ch := byte('0') + if j := d.dp + i; 0 <= j && j < d.nd { + ch = d.d[j] + } + dst.WriteByte(ch) + } + } + + return +} + +// %b: -ddddddddp±ddd +func fmtB(dst EncodingBuffer, neg bool, mant uint64, exp int, flt *floatInfo) { + // sign + if neg { + dst.WriteByte('-') + } + + // mantissa + formatBits(dst, mant, 10, false) + + // p + dst.WriteByte('p') + + // ±exponent + exp -= int(flt.mantbits) + if exp >= 0 { + dst.WriteByte('+') + } + formatBits(dst, uint64(exp), 10, exp < 0) + + return +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// formatBits computes the string representation of u in the given base. +// If neg is set, u is treated as negative int64 value. +func formatBits(dst EncodingBuffer, u uint64, base int, neg bool) { + if base < 2 || base > len(digits) { + panic("strconv: illegal AppendInt/FormatInt base") + } + // 2 <= base && base <= len(digits) + + var a [64 + 1]byte // +1 for sign of 64bit value in base 2 + i := len(a) + + if neg { + u = -u + } + + // convert bits + if base == 10 { + // common case: use constants for / because + // the compiler can optimize it into a multiply+shift + + if ^uintptr(0)>>32 == 0 { + for u > uint64(^uintptr(0)) { + q := u / 1e9 + us := uintptr(u - q*1e9) // us % 1e9 fits into a uintptr + for j := 9; j > 0; j-- { + i-- + qs := us / 10 + a[i] = byte(us - qs*10 + '0') + us = qs + } + u = q + } + } + + // u guaranteed to fit into a uintptr + us := uintptr(u) + for us >= 10 { + i-- + q := us / 10 + a[i] = byte(us - q*10 + '0') + us = q + } + // u < 10 + i-- + a[i] = byte(us + '0') + + } else if s := shifts[base]; s > 0 { + // base is power of 2: use shifts and masks instead of / and % + b := uint64(base) + m := uintptr(b) - 1 // == 1<= b { + i-- + a[i] = digits[uintptr(u)&m] + u >>= s + } + // u < base + i-- + a[i] = digits[uintptr(u)] + + } else { + // general case + b := uint64(base) + for u >= b { + i-- + q := u / b + a[i] = digits[uintptr(u-q*b)] + u = q + } + // u < base + i-- + a[i] = digits[uintptr(u)] + } + + // add sign, if any + if neg { + i-- + a[i] = '-' + } + + dst.Write(a[i:]) +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go new file mode 100644 index 0000000..46c1289 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go @@ -0,0 +1,936 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/atof.go */ + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// decimal to binary floating point conversion. +// Algorithm: +// 1) Store input in multiprecision decimal. +// 2) Multiply/divide decimal by powers of two until in range [0.5, 1) +// 3) Multiply by 2^precision and round to get mantissa. + +import "math" + +var optimize = true // can change for testing + +func equalIgnoreCase(s1 []byte, s2 []byte) bool { + if len(s1) != len(s2) { + return false + } + for i := 0; i < len(s1); i++ { + c1 := s1[i] + if 'A' <= c1 && c1 <= 'Z' { + c1 += 'a' - 'A' + } + c2 := s2[i] + if 'A' <= c2 && c2 <= 'Z' { + c2 += 'a' - 'A' + } + if c1 != c2 { + return false + } + } + return true +} + +func special(s []byte) (f float64, ok bool) { + if len(s) == 0 { + return + } + switch s[0] { + default: + return + case '+': + if equalIgnoreCase(s, []byte("+inf")) || equalIgnoreCase(s, []byte("+infinity")) { + return math.Inf(1), true + } + case '-': + if equalIgnoreCase(s, []byte("-inf")) || equalIgnoreCase(s, []byte("-infinity")) { + return math.Inf(-1), true + } + case 'n', 'N': + if equalIgnoreCase(s, []byte("nan")) { + return math.NaN(), true + } + case 'i', 'I': + if equalIgnoreCase(s, []byte("inf")) || equalIgnoreCase(s, []byte("infinity")) { + return math.Inf(1), true + } + } + return +} + +func (b *decimal) set(s []byte) (ok bool) { + i := 0 + b.neg = false + b.trunc = false + + // optional sign + if i >= len(s) { + return + } + switch { + case s[i] == '+': + i++ + case s[i] == '-': + b.neg = true + i++ + } + + // digits + sawdot := false + sawdigits := false + for ; i < len(s); i++ { + switch { + case s[i] == '.': + if sawdot { + return + } + sawdot = true + b.dp = b.nd + continue + + case '0' <= s[i] && s[i] <= '9': + sawdigits = true + if s[i] == '0' && b.nd == 0 { // ignore leading zeros + b.dp-- + continue + } + if b.nd < len(b.d) { + b.d[b.nd] = s[i] + b.nd++ + } else if s[i] != '0' { + b.trunc = true + } + continue + } + break + } + if !sawdigits { + return + } + if !sawdot { + b.dp = b.nd + } + + // optional exponent moves decimal point. + // if we read a very large, very long number, + // just be sure to move the decimal point by + // a lot (say, 100000). it doesn't matter if it's + // not the exact number. + if i < len(s) && (s[i] == 'e' || s[i] == 'E') { + i++ + if i >= len(s) { + return + } + esign := 1 + if s[i] == '+' { + i++ + } else if s[i] == '-' { + i++ + esign = -1 + } + if i >= len(s) || s[i] < '0' || s[i] > '9' { + return + } + e := 0 + for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + if e < 10000 { + e = e*10 + int(s[i]) - '0' + } + } + b.dp += e * esign + } + + if i != len(s) { + return + } + + ok = true + return +} + +// readFloat reads a decimal mantissa and exponent from a float +// string representation. It sets ok to false if the number could +// not fit return types or is invalid. +func readFloat(s []byte) (mantissa uint64, exp int, neg, trunc, ok bool) { + const uint64digits = 19 + i := 0 + + // optional sign + if i >= len(s) { + return + } + switch { + case s[i] == '+': + i++ + case s[i] == '-': + neg = true + i++ + } + + // digits + sawdot := false + sawdigits := false + nd := 0 + ndMant := 0 + dp := 0 + for ; i < len(s); i++ { + switch c := s[i]; true { + case c == '.': + if sawdot { + return + } + sawdot = true + dp = nd + continue + + case '0' <= c && c <= '9': + sawdigits = true + if c == '0' && nd == 0 { // ignore leading zeros + dp-- + continue + } + nd++ + if ndMant < uint64digits { + mantissa *= 10 + mantissa += uint64(c - '0') + ndMant++ + } else if s[i] != '0' { + trunc = true + } + continue + } + break + } + if !sawdigits { + return + } + if !sawdot { + dp = nd + } + + // optional exponent moves decimal point. + // if we read a very large, very long number, + // just be sure to move the decimal point by + // a lot (say, 100000). it doesn't matter if it's + // not the exact number. + if i < len(s) && (s[i] == 'e' || s[i] == 'E') { + i++ + if i >= len(s) { + return + } + esign := 1 + if s[i] == '+' { + i++ + } else if s[i] == '-' { + i++ + esign = -1 + } + if i >= len(s) || s[i] < '0' || s[i] > '9' { + return + } + e := 0 + for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + if e < 10000 { + e = e*10 + int(s[i]) - '0' + } + } + dp += e * esign + } + + if i != len(s) { + return + } + + exp = dp - ndMant + ok = true + return + +} + +// decimal power of ten to binary power of two. +var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26} + +func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) { + var exp int + var mant uint64 + + // Zero is always a special case. + if d.nd == 0 { + mant = 0 + exp = flt.bias + goto out + } + + // Obvious overflow/underflow. + // These bounds are for 64-bit floats. + // Will have to change if we want to support 80-bit floats in the future. + if d.dp > 310 { + goto overflow + } + if d.dp < -330 { + // zero + mant = 0 + exp = flt.bias + goto out + } + + // Scale by powers of two until in range [0.5, 1.0) + exp = 0 + for d.dp > 0 { + var n int + if d.dp >= len(powtab) { + n = 27 + } else { + n = powtab[d.dp] + } + d.Shift(-n) + exp += n + } + for d.dp < 0 || d.dp == 0 && d.d[0] < '5' { + var n int + if -d.dp >= len(powtab) { + n = 27 + } else { + n = powtab[-d.dp] + } + d.Shift(n) + exp -= n + } + + // Our range is [0.5,1) but floating point range is [1,2). + exp-- + + // Minimum representable exponent is flt.bias+1. + // If the exponent is smaller, move it up and + // adjust d accordingly. + if exp < flt.bias+1 { + n := flt.bias + 1 - exp + d.Shift(-n) + exp += n + } + + if exp-flt.bias >= 1<>= 1 + exp++ + if exp-flt.bias >= 1<>float64info.mantbits != 0 { + return + } + f = float64(mantissa) + if neg { + f = -f + } + switch { + case exp == 0: + // an integer. + return f, true + // Exact integers are <= 10^15. + // Exact powers of ten are <= 10^22. + case exp > 0 && exp <= 15+22: // int * 10^k + // If exponent is big but number of digits is not, + // can move a few zeros into the integer part. + if exp > 22 { + f *= float64pow10[exp-22] + exp = 22 + } + if f > 1e15 || f < -1e15 { + // the exponent was really too large. + return + } + return f * float64pow10[exp], true + case exp < 0 && exp >= -22: // int / 10^k + return f / float64pow10[-exp], true + } + return +} + +// If possible to compute mantissa*10^exp to 32-bit float f exactly, +// entirely in floating-point math, do so, avoiding the machinery above. +func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) { + if mantissa>>float32info.mantbits != 0 { + return + } + f = float32(mantissa) + if neg { + f = -f + } + switch { + case exp == 0: + return f, true + // Exact integers are <= 10^7. + // Exact powers of ten are <= 10^10. + case exp > 0 && exp <= 7+10: // int * 10^k + // If exponent is big but number of digits is not, + // can move a few zeros into the integer part. + if exp > 10 { + f *= float32pow10[exp-10] + exp = 10 + } + if f > 1e7 || f < -1e7 { + // the exponent was really too large. + return + } + return f * float32pow10[exp], true + case exp < 0 && exp >= -10: // int / 10^k + return f / float32pow10[-exp], true + } + return +} + +const fnParseFloat = "ParseFloat" + +func atof32(s []byte) (f float32, err error) { + if val, ok := special(s); ok { + return float32(val), nil + } + + if optimize { + // Parse mantissa and exponent. + mantissa, exp, neg, trunc, ok := readFloat(s) + if ok { + // Try pure floating-point arithmetic conversion. + if !trunc { + if f, ok := atof32exact(mantissa, exp, neg); ok { + return f, nil + } + } + // Try another fast path. + ext := new(extFloat) + if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float32info); ok { + b, ovf := ext.floatBits(&float32info) + f = math.Float32frombits(uint32(b)) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err + } + } + } + var d decimal + if !d.set(s) { + return 0, syntaxError(fnParseFloat, string(s)) + } + b, ovf := d.floatBits(&float32info) + f = math.Float32frombits(uint32(b)) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err +} + +func atof64(s []byte) (f float64, err error) { + if val, ok := special(s); ok { + return val, nil + } + + if optimize { + // Parse mantissa and exponent. + mantissa, exp, neg, trunc, ok := readFloat(s) + if ok { + // Try pure floating-point arithmetic conversion. + if !trunc { + if f, ok := atof64exact(mantissa, exp, neg); ok { + return f, nil + } + } + // Try another fast path. + ext := new(extFloat) + if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float64info); ok { + b, ovf := ext.floatBits(&float64info) + f = math.Float64frombits(b) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err + } + } + } + var d decimal + if !d.set(s) { + return 0, syntaxError(fnParseFloat, string(s)) + } + b, ovf := d.floatBits(&float64info) + f = math.Float64frombits(b) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err +} + +// ParseFloat converts the string s to a floating-point number +// with the precision specified by bitSize: 32 for float32, or 64 for float64. +// When bitSize=32, the result still has type float64, but it will be +// convertible to float32 without changing its value. +// +// If s is well-formed and near a valid floating point number, +// ParseFloat returns the nearest floating point number rounded +// using IEEE754 unbiased rounding. +// +// The errors that ParseFloat returns have concrete type *NumError +// and include err.Num = s. +// +// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax. +// +// If s is syntactically well-formed but is more than 1/2 ULP +// away from the largest floating point number of the given size, +// ParseFloat returns f = ±Inf, err.Err = ErrRange. +func ParseFloat(s []byte, bitSize int) (f float64, err error) { + if bitSize == 32 { + f1, err1 := atof32(s) + return float64(f1), err1 + } + f1, err1 := atof64(s) + return f1, err1 +} + +// oroginal: strconv/decimal.go, but not exported, and needed for PareFloat. + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +type decimal struct { + d [800]byte // digits + nd int // number of digits used + dp int // decimal point + neg bool + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// Signed int has 31 bits, and we have to be able to accommodate 9<>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := int(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := int(a.d[r]) + dig := n >> k + n -= dig << k + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n -= dig << k + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + /* + seq 27 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + n := 0 + + // Pick up a digit, put down a digit. + for r--; r >= 0; r-- { + n += (int(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go new file mode 100644 index 0000000..06eb2ec --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go @@ -0,0 +1,213 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/atoi.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "errors" + "strconv" +) + +// ErrRange indicates that a value is out of range for the target type. +var ErrRange = errors.New("value out of range") + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// A NumError records a failed conversion. +type NumError struct { + Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat) + Num string // the input + Err error // the reason the conversion failed (ErrRange, ErrSyntax) +} + +func (e *NumError) Error() string { + return "strconv." + e.Func + ": " + "parsing " + strconv.Quote(e.Num) + ": " + e.Err.Error() +} + +func syntaxError(fn, str string) *NumError { + return &NumError{fn, str, ErrSyntax} +} + +func rangeError(fn, str string) *NumError { + return &NumError{fn, str, ErrRange} +} + +const intSize = 32 << uint(^uint(0)>>63) + +// IntSize is the size in bits of an int or uint value. +const IntSize = intSize + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} + +// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte +func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &NumError{"ParseUint", string(s0), err} +} + +// ParseInt interprets a string s in the given base (2 to 36) and +// returns the corresponding value i. If base == 0, the base is +// implied by the string's prefix: base 16 for "0x", base 8 for +// "0", and base 10 otherwise. +// +// The bitSize argument specifies the integer type +// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64 +// correspond to int, int8, int16, int32, and int64. +// +// The errors that ParseInt returns have concrete type *NumError +// and include err.Num = s. If s is empty or contains invalid +// digits, err.Err = ErrSyntax and the returned value is 0; +// if the value corresponding to s cannot be represented by a +// signed integer of the given size, err.Err = ErrRange and the +// returned value is the maximum magnitude integer of the +// appropriate bitSize and sign. +func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { + const fnParseInt = "ParseInt" + + if bitSize == 0 { + bitSize = int(IntSize) + } + + // Empty string bad. + if len(s) == 0 { + return 0, syntaxError(fnParseInt, string(s)) + } + + // Pick off leading sign. + s0 := s + neg := false + if s[0] == '+' { + s = s[1:] + } else if s[0] == '-' { + neg = true + s = s[1:] + } + + // Convert unsigned and check range. + var un uint64 + un, err = ParseUint(s, base, bitSize) + if err != nil && err.(*NumError).Err != ErrRange { + err.(*NumError).Func = fnParseInt + err.(*NumError).Num = string(s0) + return 0, err + } + cutoff := uint64(1 << uint(bitSize-1)) + if !neg && un >= cutoff { + return int64(cutoff - 1), rangeError(fnParseInt, string(s0)) + } + if neg && un > cutoff { + return -int64(cutoff), rangeError(fnParseInt, string(s0)) + } + n := int64(un) + if neg { + n = -n + } + return n, nil +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go new file mode 100644 index 0000000..ab79108 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go @@ -0,0 +1,668 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// An extFloat represents an extended floating-point number, with more +// precision than a float64. It does not try to save bits: the +// number represented by the structure is mant*(2^exp), with a negative +// sign if neg is true. +type extFloat struct { + mant uint64 + exp int + neg bool +} + +// Powers of ten taken from double-conversion library. +// http://code.google.com/p/double-conversion/ +const ( + firstPowerOfTen = -348 + stepPowerOfTen = 8 +) + +var smallPowersOfTen = [...]extFloat{ + {1 << 63, -63, false}, // 1 + {0xa << 60, -60, false}, // 1e1 + {0x64 << 57, -57, false}, // 1e2 + {0x3e8 << 54, -54, false}, // 1e3 + {0x2710 << 50, -50, false}, // 1e4 + {0x186a0 << 47, -47, false}, // 1e5 + {0xf4240 << 44, -44, false}, // 1e6 + {0x989680 << 40, -40, false}, // 1e7 +} + +var powersOfTen = [...]extFloat{ + {0xfa8fd5a0081c0288, -1220, false}, // 10^-348 + {0xbaaee17fa23ebf76, -1193, false}, // 10^-340 + {0x8b16fb203055ac76, -1166, false}, // 10^-332 + {0xcf42894a5dce35ea, -1140, false}, // 10^-324 + {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 + {0xe61acf033d1a45df, -1087, false}, // 10^-308 + {0xab70fe17c79ac6ca, -1060, false}, // 10^-300 + {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 + {0xbe5691ef416bd60c, -1007, false}, // 10^-284 + {0x8dd01fad907ffc3c, -980, false}, // 10^-276 + {0xd3515c2831559a83, -954, false}, // 10^-268 + {0x9d71ac8fada6c9b5, -927, false}, // 10^-260 + {0xea9c227723ee8bcb, -901, false}, // 10^-252 + {0xaecc49914078536d, -874, false}, // 10^-244 + {0x823c12795db6ce57, -847, false}, // 10^-236 + {0xc21094364dfb5637, -821, false}, // 10^-228 + {0x9096ea6f3848984f, -794, false}, // 10^-220 + {0xd77485cb25823ac7, -768, false}, // 10^-212 + {0xa086cfcd97bf97f4, -741, false}, // 10^-204 + {0xef340a98172aace5, -715, false}, // 10^-196 + {0xb23867fb2a35b28e, -688, false}, // 10^-188 + {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180 + {0xc5dd44271ad3cdba, -635, false}, // 10^-172 + {0x936b9fcebb25c996, -608, false}, // 10^-164 + {0xdbac6c247d62a584, -582, false}, // 10^-156 + {0xa3ab66580d5fdaf6, -555, false}, // 10^-148 + {0xf3e2f893dec3f126, -529, false}, // 10^-140 + {0xb5b5ada8aaff80b8, -502, false}, // 10^-132 + {0x87625f056c7c4a8b, -475, false}, // 10^-124 + {0xc9bcff6034c13053, -449, false}, // 10^-116 + {0x964e858c91ba2655, -422, false}, // 10^-108 + {0xdff9772470297ebd, -396, false}, // 10^-100 + {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92 + {0xf8a95fcf88747d94, -343, false}, // 10^-84 + {0xb94470938fa89bcf, -316, false}, // 10^-76 + {0x8a08f0f8bf0f156b, -289, false}, // 10^-68 + {0xcdb02555653131b6, -263, false}, // 10^-60 + {0x993fe2c6d07b7fac, -236, false}, // 10^-52 + {0xe45c10c42a2b3b06, -210, false}, // 10^-44 + {0xaa242499697392d3, -183, false}, // 10^-36 + {0xfd87b5f28300ca0e, -157, false}, // 10^-28 + {0xbce5086492111aeb, -130, false}, // 10^-20 + {0x8cbccc096f5088cc, -103, false}, // 10^-12 + {0xd1b71758e219652c, -77, false}, // 10^-4 + {0x9c40000000000000, -50, false}, // 10^4 + {0xe8d4a51000000000, -24, false}, // 10^12 + {0xad78ebc5ac620000, 3, false}, // 10^20 + {0x813f3978f8940984, 30, false}, // 10^28 + {0xc097ce7bc90715b3, 56, false}, // 10^36 + {0x8f7e32ce7bea5c70, 83, false}, // 10^44 + {0xd5d238a4abe98068, 109, false}, // 10^52 + {0x9f4f2726179a2245, 136, false}, // 10^60 + {0xed63a231d4c4fb27, 162, false}, // 10^68 + {0xb0de65388cc8ada8, 189, false}, // 10^76 + {0x83c7088e1aab65db, 216, false}, // 10^84 + {0xc45d1df942711d9a, 242, false}, // 10^92 + {0x924d692ca61be758, 269, false}, // 10^100 + {0xda01ee641a708dea, 295, false}, // 10^108 + {0xa26da3999aef774a, 322, false}, // 10^116 + {0xf209787bb47d6b85, 348, false}, // 10^124 + {0xb454e4a179dd1877, 375, false}, // 10^132 + {0x865b86925b9bc5c2, 402, false}, // 10^140 + {0xc83553c5c8965d3d, 428, false}, // 10^148 + {0x952ab45cfa97a0b3, 455, false}, // 10^156 + {0xde469fbd99a05fe3, 481, false}, // 10^164 + {0xa59bc234db398c25, 508, false}, // 10^172 + {0xf6c69a72a3989f5c, 534, false}, // 10^180 + {0xb7dcbf5354e9bece, 561, false}, // 10^188 + {0x88fcf317f22241e2, 588, false}, // 10^196 + {0xcc20ce9bd35c78a5, 614, false}, // 10^204 + {0x98165af37b2153df, 641, false}, // 10^212 + {0xe2a0b5dc971f303a, 667, false}, // 10^220 + {0xa8d9d1535ce3b396, 694, false}, // 10^228 + {0xfb9b7cd9a4a7443c, 720, false}, // 10^236 + {0xbb764c4ca7a44410, 747, false}, // 10^244 + {0x8bab8eefb6409c1a, 774, false}, // 10^252 + {0xd01fef10a657842c, 800, false}, // 10^260 + {0x9b10a4e5e9913129, 827, false}, // 10^268 + {0xe7109bfba19c0c9d, 853, false}, // 10^276 + {0xac2820d9623bf429, 880, false}, // 10^284 + {0x80444b5e7aa7cf85, 907, false}, // 10^292 + {0xbf21e44003acdd2d, 933, false}, // 10^300 + {0x8e679c2f5e44ff8f, 960, false}, // 10^308 + {0xd433179d9c8cb841, 986, false}, // 10^316 + {0x9e19db92b4e31ba9, 1013, false}, // 10^324 + {0xeb96bf6ebadf77d9, 1039, false}, // 10^332 + {0xaf87023b9bf0ee6b, 1066, false}, // 10^340 +} + +// floatBits returns the bits of the float64 that best approximates +// the extFloat passed as receiver. Overflow is set to true if +// the resulting float64 is ±Inf. +func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { + f.Normalize() + + exp := f.exp + 63 + + // Exponent too small. + if exp < flt.bias+1 { + n := flt.bias + 1 - exp + f.mant >>= uint(n) + exp += n + } + + // Extract 1+flt.mantbits bits from the 64-bit mantissa. + mant := f.mant >> (63 - flt.mantbits) + if f.mant&(1<<(62-flt.mantbits)) != 0 { + // Round up. + mant += 1 + } + + // Rounding might have added a bit; shift down. + if mant == 2<>= 1 + exp++ + } + + // Infinities. + if exp-flt.bias >= 1<>uint(-f.exp))<>= uint(-f.exp) + f.exp = 0 + return *f, *f + } + expBiased := exp - flt.bias + + upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} + if mant != 1<>(64-32) == 0 { + mant <<= 32 + exp -= 32 + } + if mant>>(64-16) == 0 { + mant <<= 16 + exp -= 16 + } + if mant>>(64-8) == 0 { + mant <<= 8 + exp -= 8 + } + if mant>>(64-4) == 0 { + mant <<= 4 + exp -= 4 + } + if mant>>(64-2) == 0 { + mant <<= 2 + exp -= 2 + } + if mant>>(64-1) == 0 { + mant <<= 1 + exp -= 1 + } + shift = uint(f.exp - exp) + f.mant, f.exp = mant, exp + return +} + +// Multiply sets f to the product f*g: the result is correctly rounded, +// but not normalized. +func (f *extFloat) Multiply(g extFloat) { + fhi, flo := f.mant>>32, uint64(uint32(f.mant)) + ghi, glo := g.mant>>32, uint64(uint32(g.mant)) + + // Cross products. + cross1 := fhi * glo + cross2 := flo * ghi + + // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo + f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) + rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) + // Round up. + rem += (1 << 31) + + f.mant += (rem >> 32) + f.exp = f.exp + g.exp + 64 +} + +var uint64pow10 = [...]uint64{ + 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, +} + +// AssignDecimal sets f to an approximate value mantissa*10^exp. It +// returns true if the value represented by f is guaranteed to be the +// best approximation of d after being rounded to a float64 or +// float32 depending on flt. +func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { + const uint64digits = 19 + const errorscale = 8 + errors := 0 // An upper bound for error, computed in errorscale*ulp. + if trunc { + // the decimal number was truncated. + errors += errorscale / 2 + } + + f.mant = mantissa + f.exp = 0 + f.neg = neg + + // Multiply by powers of ten. + i := (exp10 - firstPowerOfTen) / stepPowerOfTen + if exp10 < firstPowerOfTen || i >= len(powersOfTen) { + return false + } + adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen + + // We multiply by exp%step + if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { + // We can multiply the mantissa exactly. + f.mant *= uint64pow10[adjExp] + f.Normalize() + } else { + f.Normalize() + f.Multiply(smallPowersOfTen[adjExp]) + errors += errorscale / 2 + } + + // We multiply by 10 to the exp - exp%step. + f.Multiply(powersOfTen[i]) + if errors > 0 { + errors += 1 + } + errors += errorscale / 2 + + // Normalize + shift := f.Normalize() + errors <<= shift + + // Now f is a good approximation of the decimal. + // Check whether the error is too large: that is, if the mantissa + // is perturbated by the error, the resulting float64 will change. + // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. + // + // In many cases the approximation will be good enough. + denormalExp := flt.bias - 63 + var extrabits uint + if f.exp <= denormalExp { + // f.mant * 2^f.exp is smaller than 2^(flt.bias+1). + extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) + } else { + extrabits = uint(63 - flt.mantbits) + } + + halfway := uint64(1) << (extrabits - 1) + mant_extra := f.mant & (1< expMax: + i-- + default: + break Loop + } + } + // Apply the desired decimal shift on f. It will have exponent + // in the desired range. This is multiplication by 10^-exp10. + f.Multiply(powersOfTen[i]) + + return -(firstPowerOfTen + i*stepPowerOfTen), i +} + +// frexp10Many applies a common shift by a power of ten to a, b, c. +func frexp10Many(a, b, c *extFloat) (exp10 int) { + exp10, i := c.frexp10() + a.Multiply(powersOfTen[i]) + b.Multiply(powersOfTen[i]) + return +} + +// FixedDecimal stores in d the first n significant digits +// of the decimal representation of f. It returns false +// if it cannot be sure of the answer. +func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if n == 0 { + panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") + } + // Multiply by an appropriate power of ten to have a reasonable + // number to process. + f.Normalize() + exp10, _ := f.frexp10() + + shift := uint(-f.exp) + integer := uint32(f.mant >> shift) + fraction := f.mant - (uint64(integer) << shift) + ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. + + // Write exactly n digits to d. + needed := n // how many digits are left to write. + integerDigits := 0 // the number of decimal digits of integer. + pow10 := uint64(1) // the power of ten by which f was scaled. + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + rest := integer + if integerDigits > needed { + // the integral part is already large, trim the last digits. + pow10 = uint64pow10[integerDigits-needed] + integer /= uint32(pow10) + rest -= integer * uint32(pow10) + } else { + rest = 0 + } + + // Write the digits of integer: the digits of rest are omitted. + var buf [32]byte + pos := len(buf) + for v := integer; v > 0; { + v1 := v / 10 + v -= 10 * v1 + pos-- + buf[pos] = byte(v + '0') + v = v1 + } + for i := pos; i < len(buf); i++ { + d.d[i-pos] = buf[i] + } + nd := len(buf) - pos + d.nd = nd + d.dp = integerDigits + exp10 + needed -= nd + + if needed > 0 { + if rest != 0 || pow10 != 1 { + panic("strconv: internal error, rest != 0 but needed > 0") + } + // Emit digits for the fractional part. Each time, 10*fraction + // fits in a uint64 without overflow. + for needed > 0 { + fraction *= 10 + ε *= 10 // the uncertainty scales as we multiply by ten. + if 2*ε > 1<> shift + d.d[nd] = byte(digit + '0') + fraction -= digit << shift + nd++ + needed-- + } + d.nd = nd + } + + // We have written a truncation of f (a numerator / 10^d.dp). The remaining part + // can be interpreted as a small number (< 1) to be added to the last digit of the + // numerator. + // + // If rest > 0, the amount is: + // (rest< 0 guarantees that pow10 << shift does not overflow a uint64. + // + // If rest = 0, pow10 == 1 and the amount is + // fraction / (1 << shift) + // fraction being known with a ±ε uncertainty. + // + // We pass this information to the rounding routine for adjustment. + + ok := adjustLastDigitFixed(d, uint64(rest)<= 0; i-- { + if d.d[i] != '0' { + d.nd = i + 1 + break + } + } + return true +} + +// adjustLastDigitFixed assumes d contains the representation of the integral part +// of some number, whose fractional part is num / (den << shift). The numerator +// num is only known up to an uncertainty of size ε, assumed to be less than +// (den << shift)/2. +// +// It will increase the last digit by one to account for correct rounding, typically +// when the fractional part is greater than 1/2, and will return false if ε is such +// that no correct answer can be given. +func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { + if num > den< den< den< (den< den<= 0; i-- { + if d.d[i] == '9' { + d.nd-- + } else { + break + } + } + if i < 0 { + d.d[0] = '1' + d.nd = 1 + d.dp++ + } else { + d.d[i]++ + } + return true + } + return false +} + +// ShortestDecimal stores in d the shortest decimal representation of f +// which belongs to the open interval (lower, upper), where f is supposed +// to lie. It returns false whenever the result is unsure. The implementation +// uses the Grisu3 algorithm. +func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if f.exp == 0 && *lower == *f && *lower == *upper { + // an exact integer. + var buf [24]byte + n := len(buf) - 1 + for v := f.mant; v > 0; { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n-- + v = v1 + } + nd := len(buf) - n - 1 + for i := 0; i < nd; i++ { + d.d[i] = buf[n+1+i] + } + d.nd, d.dp = nd, nd + for d.nd > 0 && d.d[d.nd-1] == '0' { + d.nd-- + } + if d.nd == 0 { + d.dp = 0 + } + d.neg = f.neg + return true + } + upper.Normalize() + // Uniformize exponents. + if f.exp > upper.exp { + f.mant <<= uint(f.exp - upper.exp) + f.exp = upper.exp + } + if lower.exp > upper.exp { + lower.mant <<= uint(lower.exp - upper.exp) + lower.exp = upper.exp + } + + exp10 := frexp10Many(lower, f, upper) + // Take a safety margin due to rounding in frexp10Many, but we lose precision. + upper.mant++ + lower.mant-- + + // The shortest representation of f is either rounded up or down, but + // in any case, it is a truncation of upper. + shift := uint(-upper.exp) + integer := uint32(upper.mant >> shift) + fraction := upper.mant - (uint64(integer) << shift) + + // How far we can go down from upper until the result is wrong. + allowance := upper.mant - lower.mant + // How far we should go to get a very precise result. + targetDiff := upper.mant - f.mant + + // Count integral digits: there are at most 10. + var integerDigits int + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + for i := 0; i < integerDigits; i++ { + pow := uint64pow10[integerDigits-i-1] + digit := integer / uint32(pow) + d.d[i] = byte(digit + '0') + integer -= digit * uint32(pow) + // evaluate whether we should stop. + if currentDiff := uint64(integer)<> shift) + d.d[d.nd] = byte(digit + '0') + d.nd++ + fraction -= uint64(digit) << shift + if fraction < allowance*multiplier { + // We are in the admissible range. Note that if allowance is about to + // overflow, that is, allowance > 2^64/10, the condition is automatically + // true due to the limited range of fraction. + return adjustLastDigit(d, + fraction, targetDiff*multiplier, allowance*multiplier, + 1< maxDiff-ulpBinary { + // we went too far + return false + } + if d.nd == 1 && d.d[0] == '0' { + // the number has actually reached zero. + d.nd = 0 + d.dp = 0 + } + return true +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go new file mode 100644 index 0000000..253f83b --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go @@ -0,0 +1,475 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary to decimal floating point conversion. +// Algorithm: +// 1) store mantissa in multiprecision decimal +// 2) shift decimal by exponent +// 3) read digits out & format + +package internal + +import "math" + +// TODO: move elsewhere? +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// FormatFloat converts the floating-point number f to a string, +// according to the format fmt and precision prec. It rounds the +// result assuming that the original was obtained from a floating-point +// value of bitSize bits (32 for float32, 64 for float64). +// +// The format fmt is one of +// 'b' (-ddddp±ddd, a binary exponent), +// 'e' (-d.dddde±dd, a decimal exponent), +// 'E' (-d.ddddE±dd, a decimal exponent), +// 'f' (-ddd.dddd, no exponent), +// 'g' ('e' for large exponents, 'f' otherwise), or +// 'G' ('E' for large exponents, 'f' otherwise). +// +// The precision prec controls the number of digits +// (excluding the exponent) printed by the 'e', 'E', 'f', 'g', and 'G' formats. +// For 'e', 'E', and 'f' it is the number of digits after the decimal point. +// For 'g' and 'G' it is the total number of digits. +// The special precision -1 uses the smallest number of digits +// necessary such that ParseFloat will return f exactly. +func formatFloat(f float64, fmt byte, prec, bitSize int) string { + return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize)) +} + +// AppendFloat appends the string form of the floating-point number f, +// as generated by FormatFloat, to dst and returns the extended buffer. +func appendFloat(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte { + return genericFtoa(dst, f, fmt, prec, bitSize) +} + +func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { + var bits uint64 + var flt *floatInfo + switch bitSize { + case 32: + bits = uint64(math.Float32bits(float32(val))) + flt = &float32info + case 64: + bits = math.Float64bits(val) + flt = &float64info + default: + panic("strconv: illegal AppendFloat/FormatFloat bitSize") + } + + neg := bits>>(flt.expbits+flt.mantbits) != 0 + exp := int(bits>>flt.mantbits) & (1< digs.nd && digs.nd >= digs.dp { + eprec = digs.nd + } + // %e is used if the exponent from the conversion + // is less than -4 or greater than or equal to the precision. + // if precision was the shortest possible, use precision 6 for this decision. + if shortest { + eprec = 6 + } + exp := digs.dp - 1 + if exp < -4 || exp >= eprec { + if prec > digs.nd { + prec = digs.nd + } + return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') + } + if prec > digs.dp { + prec = digs.nd + } + return fmtF(dst, neg, digs, max(prec-digs.dp, 0)) + } + + // unknown format + return append(dst, '%', fmt) +} + +// Round d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely +// reconstructed. Size is original floating point size (64 or 32). +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1< 0 { + dst = append(dst, '.') + i := 1 + m := d.nd + prec + 1 - max(d.nd, prec+1) + for i < m { + dst = append(dst, d.d[i]) + i++ + } + for i <= prec { + dst = append(dst, '0') + i++ + } + } + + // e± + dst = append(dst, fmt) + exp := d.dp - 1 + if d.nd == 0 { // special case: 0 has exponent 0 + exp = 0 + } + if exp < 0 { + ch = '-' + exp = -exp + } else { + ch = '+' + } + dst = append(dst, ch) + + // dddd + var buf [3]byte + i := len(buf) + for exp >= 10 { + i-- + buf[i] = byte(exp%10 + '0') + exp /= 10 + } + // exp < 10 + i-- + buf[i] = byte(exp + '0') + + switch i { + case 0: + dst = append(dst, buf[0], buf[1], buf[2]) + case 1: + dst = append(dst, buf[1], buf[2]) + case 2: + // leading zeroes + dst = append(dst, '0', buf[2]) + } + return dst +} + +// %f: -ddddddd.ddddd +func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte { + // sign + if neg { + dst = append(dst, '-') + } + + // integer, padded with zeros as needed. + if d.dp > 0 { + var i int + for i = 0; i < d.dp && i < d.nd; i++ { + dst = append(dst, d.d[i]) + } + for ; i < d.dp; i++ { + dst = append(dst, '0') + } + } else { + dst = append(dst, '0') + } + + // fraction + if prec > 0 { + dst = append(dst, '.') + for i := 0; i < prec; i++ { + ch := byte('0') + if j := d.dp + i; 0 <= j && j < d.nd { + ch = d.d[j] + } + dst = append(dst, ch) + } + } + + return dst +} + +// %b: -ddddddddp+ddd +func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte { + var buf [50]byte + w := len(buf) + exp -= int(flt.mantbits) + esign := byte('+') + if exp < 0 { + esign = '-' + exp = -exp + } + n := 0 + for exp > 0 || n < 1 { + n++ + w-- + buf[w] = byte(exp%10 + '0') + exp /= 10 + } + w-- + buf[w] = esign + w-- + buf[w] = 'p' + n = 0 + for mant > 0 || n < 1 { + n++ + w-- + buf[w] = byte(mant%10 + '0') + mant /= 10 + } + if neg { + w-- + buf[w] = '-' + } + return append(dst, buf[w:]...) +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go b/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go new file mode 100644 index 0000000..3e50f0c --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go @@ -0,0 +1,161 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/iota.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "io" +) + +const ( + digits = "0123456789abcdefghijklmnopqrstuvwxyz" + digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" +) + +var shifts = [len(digits) + 1]uint{ + 1 << 1: 1, + 1 << 2: 2, + 1 << 3: 3, + 1 << 4: 4, + 1 << 5: 5, +} + +var smallNumbers = [][]byte{ + []byte("0"), + []byte("1"), + []byte("2"), + []byte("3"), + []byte("4"), + []byte("5"), + []byte("6"), + []byte("7"), + []byte("8"), + []byte("9"), + []byte("10"), +} + +type FormatBitsWriter interface { + io.Writer + io.ByteWriter +} + +type FormatBitsScratch struct{} + +// +// DEPRECIATED: `scratch` is no longer used, FormatBits2 is available. +// +// FormatBits computes the string representation of u in the given base. +// If neg is set, u is treated as negative int64 value. If append_ is +// set, the string is appended to dst and the resulting byte slice is +// returned as the first result value; otherwise the string is returned +// as the second result value. +// +func FormatBits(scratch *FormatBitsScratch, dst FormatBitsWriter, u uint64, base int, neg bool) { + FormatBits2(dst, u, base, neg) +} + +// FormatBits2 computes the string representation of u in the given base. +// If neg is set, u is treated as negative int64 value. If append_ is +// set, the string is appended to dst and the resulting byte slice is +// returned as the first result value; otherwise the string is returned +// as the second result value. +// +func FormatBits2(dst FormatBitsWriter, u uint64, base int, neg bool) { + if base < 2 || base > len(digits) { + panic("strconv: illegal AppendInt/FormatInt base") + } + // fast path for small common numbers + if u <= 10 { + if neg { + dst.WriteByte('-') + } + dst.Write(smallNumbers[u]) + return + } + + // 2 <= base && base <= len(digits) + + var a = makeSlice(65) + // var a [64 + 1]byte // +1 for sign of 64bit value in base 2 + i := len(a) + + if neg { + u = -u + } + + // convert bits + if base == 10 { + // common case: use constants for / and % because + // the compiler can optimize it into a multiply+shift, + // and unroll loop + for u >= 100 { + i -= 2 + q := u / 100 + j := uintptr(u - q*100) + a[i+1] = digits01[j] + a[i+0] = digits10[j] + u = q + } + if u >= 10 { + i-- + q := u / 10 + a[i] = digits[uintptr(u-q*10)] + u = q + } + + } else if s := shifts[base]; s > 0 { + // base is power of 2: use shifts and masks instead of / and % + b := uint64(base) + m := uintptr(b) - 1 // == 1<= b { + i-- + a[i] = digits[uintptr(u)&m] + u >>= s + } + + } else { + // general case + b := uint64(base) + for u >= b { + i-- + a[i] = digits[uintptr(u%b)] + u /= b + } + } + + // u < base + i-- + a[i] = digits[uintptr(u)] + + // add sign, if any + if neg { + i-- + a[i] = '-' + } + + dst.Write(a[i:]) + + Pool(a) + + return +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go b/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go new file mode 100644 index 0000000..513b45d --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go @@ -0,0 +1,512 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's encoding/json/encode.go */ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "io" + "unicode/utf8" + "strconv" + "unicode/utf16" + "unicode" +) + +const hex = "0123456789abcdef" + +type JsonStringWriter interface { + io.Writer + io.ByteWriter + stringWriter +} + +func WriteJsonString(buf JsonStringWriter, s string) { + WriteJson(buf, []byte(s)) +} + +/** + * Function ported from encoding/json: func (e *encodeState) string(s string) (int, error) + */ +func WriteJson(buf JsonStringWriter, s []byte) { + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + /* + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + */ + if lt[b] == true { + i++ + continue + } + + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as < and >. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into JSON and served to some browsers. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} + +// UnquoteBytes will decode []byte containing json string to go string +// ported from encoding/json/decode.go +func UnquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// TODO(pquerna): consider combining wibth the normal byte mask. +var lt [256]bool = [256]bool{ + false, /* 0 */ + false, /* 1 */ + false, /* 2 */ + false, /* 3 */ + false, /* 4 */ + false, /* 5 */ + false, /* 6 */ + false, /* 7 */ + false, /* 8 */ + false, /* 9 */ + false, /* 10 */ + false, /* 11 */ + false, /* 12 */ + false, /* 13 */ + false, /* 14 */ + false, /* 15 */ + false, /* 16 */ + false, /* 17 */ + false, /* 18 */ + false, /* 19 */ + false, /* 20 */ + false, /* 21 */ + false, /* 22 */ + false, /* 23 */ + false, /* 24 */ + false, /* 25 */ + false, /* 26 */ + false, /* 27 */ + false, /* 28 */ + false, /* 29 */ + false, /* 30 */ + false, /* 31 */ + true, /* 32 */ + true, /* 33 */ + false, /* 34 */ + true, /* 35 */ + true, /* 36 */ + true, /* 37 */ + false, /* 38 */ + true, /* 39 */ + true, /* 40 */ + true, /* 41 */ + true, /* 42 */ + true, /* 43 */ + true, /* 44 */ + true, /* 45 */ + true, /* 46 */ + true, /* 47 */ + true, /* 48 */ + true, /* 49 */ + true, /* 50 */ + true, /* 51 */ + true, /* 52 */ + true, /* 53 */ + true, /* 54 */ + true, /* 55 */ + true, /* 56 */ + true, /* 57 */ + true, /* 58 */ + true, /* 59 */ + false, /* 60 */ + true, /* 61 */ + false, /* 62 */ + true, /* 63 */ + true, /* 64 */ + true, /* 65 */ + true, /* 66 */ + true, /* 67 */ + true, /* 68 */ + true, /* 69 */ + true, /* 70 */ + true, /* 71 */ + true, /* 72 */ + true, /* 73 */ + true, /* 74 */ + true, /* 75 */ + true, /* 76 */ + true, /* 77 */ + true, /* 78 */ + true, /* 79 */ + true, /* 80 */ + true, /* 81 */ + true, /* 82 */ + true, /* 83 */ + true, /* 84 */ + true, /* 85 */ + true, /* 86 */ + true, /* 87 */ + true, /* 88 */ + true, /* 89 */ + true, /* 90 */ + true, /* 91 */ + false, /* 92 */ + true, /* 93 */ + true, /* 94 */ + true, /* 95 */ + true, /* 96 */ + true, /* 97 */ + true, /* 98 */ + true, /* 99 */ + true, /* 100 */ + true, /* 101 */ + true, /* 102 */ + true, /* 103 */ + true, /* 104 */ + true, /* 105 */ + true, /* 106 */ + true, /* 107 */ + true, /* 108 */ + true, /* 109 */ + true, /* 110 */ + true, /* 111 */ + true, /* 112 */ + true, /* 113 */ + true, /* 114 */ + true, /* 115 */ + true, /* 116 */ + true, /* 117 */ + true, /* 118 */ + true, /* 119 */ + true, /* 120 */ + true, /* 121 */ + true, /* 122 */ + true, /* 123 */ + true, /* 124 */ + true, /* 125 */ + true, /* 126 */ + true, /* 127 */ + true, /* 128 */ + true, /* 129 */ + true, /* 130 */ + true, /* 131 */ + true, /* 132 */ + true, /* 133 */ + true, /* 134 */ + true, /* 135 */ + true, /* 136 */ + true, /* 137 */ + true, /* 138 */ + true, /* 139 */ + true, /* 140 */ + true, /* 141 */ + true, /* 142 */ + true, /* 143 */ + true, /* 144 */ + true, /* 145 */ + true, /* 146 */ + true, /* 147 */ + true, /* 148 */ + true, /* 149 */ + true, /* 150 */ + true, /* 151 */ + true, /* 152 */ + true, /* 153 */ + true, /* 154 */ + true, /* 155 */ + true, /* 156 */ + true, /* 157 */ + true, /* 158 */ + true, /* 159 */ + true, /* 160 */ + true, /* 161 */ + true, /* 162 */ + true, /* 163 */ + true, /* 164 */ + true, /* 165 */ + true, /* 166 */ + true, /* 167 */ + true, /* 168 */ + true, /* 169 */ + true, /* 170 */ + true, /* 171 */ + true, /* 172 */ + true, /* 173 */ + true, /* 174 */ + true, /* 175 */ + true, /* 176 */ + true, /* 177 */ + true, /* 178 */ + true, /* 179 */ + true, /* 180 */ + true, /* 181 */ + true, /* 182 */ + true, /* 183 */ + true, /* 184 */ + true, /* 185 */ + true, /* 186 */ + true, /* 187 */ + true, /* 188 */ + true, /* 189 */ + true, /* 190 */ + true, /* 191 */ + true, /* 192 */ + true, /* 193 */ + true, /* 194 */ + true, /* 195 */ + true, /* 196 */ + true, /* 197 */ + true, /* 198 */ + true, /* 199 */ + true, /* 200 */ + true, /* 201 */ + true, /* 202 */ + true, /* 203 */ + true, /* 204 */ + true, /* 205 */ + true, /* 206 */ + true, /* 207 */ + true, /* 208 */ + true, /* 209 */ + true, /* 210 */ + true, /* 211 */ + true, /* 212 */ + true, /* 213 */ + true, /* 214 */ + true, /* 215 */ + true, /* 216 */ + true, /* 217 */ + true, /* 218 */ + true, /* 219 */ + true, /* 220 */ + true, /* 221 */ + true, /* 222 */ + true, /* 223 */ + true, /* 224 */ + true, /* 225 */ + true, /* 226 */ + true, /* 227 */ + true, /* 228 */ + true, /* 229 */ + true, /* 230 */ + true, /* 231 */ + true, /* 232 */ + true, /* 233 */ + true, /* 234 */ + true, /* 235 */ + true, /* 236 */ + true, /* 237 */ + true, /* 238 */ + true, /* 239 */ + true, /* 240 */ + true, /* 241 */ + true, /* 242 */ + true, /* 243 */ + true, /* 244 */ + true, /* 245 */ + true, /* 246 */ + true, /* 247 */ + true, /* 248 */ + true, /* 249 */ + true, /* 250 */ + true, /* 251 */ + true, /* 252 */ + true, /* 253 */ + true, /* 254 */ + true, /* 255 */ +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go b/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go new file mode 100644 index 0000000..8ffd54b --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go @@ -0,0 +1,937 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on derived from yajl: */ +/* + * Copyright (c) 2007-2014, Lloyd Hilaiel + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package v1 + +import ( + "errors" + "fmt" + "io" +) + +type FFParseState int + +const ( + FFParse_map_start FFParseState = iota + FFParse_want_key + FFParse_want_colon + FFParse_want_value + FFParse_after_value +) + +type FFTok int + +const ( + FFTok_init FFTok = iota + FFTok_bool FFTok = iota + FFTok_colon FFTok = iota + FFTok_comma FFTok = iota + FFTok_eof FFTok = iota + FFTok_error FFTok = iota + FFTok_left_brace FFTok = iota + FFTok_left_bracket FFTok = iota + FFTok_null FFTok = iota + FFTok_right_brace FFTok = iota + FFTok_right_bracket FFTok = iota + + /* we differentiate between integers and doubles to allow the + * parser to interpret the number without re-scanning */ + FFTok_integer FFTok = iota + FFTok_double FFTok = iota + + FFTok_string FFTok = iota + + /* comment tokens are not currently returned to the parser, ever */ + FFTok_comment FFTok = iota +) + +type FFErr int + +const ( + FFErr_e_ok FFErr = iota + FFErr_io FFErr = iota + FFErr_string_invalid_utf8 FFErr = iota + FFErr_string_invalid_escaped_char FFErr = iota + FFErr_string_invalid_json_char FFErr = iota + FFErr_string_invalid_hex_char FFErr = iota + FFErr_invalid_char FFErr = iota + FFErr_invalid_string FFErr = iota + FFErr_missing_integer_after_decimal FFErr = iota + FFErr_missing_integer_after_exponent FFErr = iota + FFErr_missing_integer_after_minus FFErr = iota + FFErr_unallowed_comment FFErr = iota + FFErr_incomplete_comment FFErr = iota + FFErr_unexpected_token_type FFErr = iota // TODO: improve this error +) + +type FFLexer struct { + reader *ffReader + Output DecodingBuffer + Token FFTok + Error FFErr + BigError error + // TODO: convert all of this to an interface + lastCurrentChar int + captureAll bool + buf Buffer +} + +func NewFFLexer(input []byte) *FFLexer { + fl := &FFLexer{ + Token: FFTok_init, + Error: FFErr_e_ok, + reader: newffReader(input), + Output: &Buffer{}, + } + // TODO: guess size? + //fl.Output.Grow(64) + return fl +} + +type LexerError struct { + offset int + line int + char int + err error +} + +// Reset the Lexer and add new input. +func (ffl *FFLexer) Reset(input []byte) { + ffl.Token = FFTok_init + ffl.Error = FFErr_e_ok + ffl.BigError = nil + ffl.reader.Reset(input) + ffl.lastCurrentChar = 0 + ffl.Output.Reset() +} + +func (le *LexerError) Error() string { + return fmt.Sprintf(`ffjson error: (%T)%s offset=%d line=%d char=%d`, + le.err, le.err.Error(), + le.offset, le.line, le.char) +} + +func (ffl *FFLexer) WrapErr(err error) error { + line, char := ffl.reader.PosWithLine() + // TOOD: calcualte lines/characters based on offset + return &LexerError{ + offset: ffl.reader.Pos(), + line: line, + char: char, + err: err, + } +} + +func (ffl *FFLexer) scanReadByte() (byte, error) { + var c byte + var err error + if ffl.captureAll { + c, err = ffl.reader.ReadByte() + } else { + c, err = ffl.reader.ReadByteNoWS() + } + + if err != nil { + ffl.Error = FFErr_io + ffl.BigError = err + return 0, err + } + + return c, nil +} + +func (ffl *FFLexer) readByte() (byte, error) { + + c, err := ffl.reader.ReadByte() + if err != nil { + ffl.Error = FFErr_io + ffl.BigError = err + return 0, err + } + + return c, nil +} + +func (ffl *FFLexer) unreadByte() { + ffl.reader.UnreadByte() +} + +func (ffl *FFLexer) wantBytes(want []byte, iftrue FFTok) FFTok { + startPos := ffl.reader.Pos() + for _, b := range want { + c, err := ffl.readByte() + + if err != nil { + return FFTok_error + } + + if c != b { + ffl.unreadByte() + // fmt.Printf("wanted bytes: %s\n", string(want)) + // TODO(pquerna): thsi is a bad error message + ffl.Error = FFErr_invalid_string + return FFTok_error + } + } + + endPos := ffl.reader.Pos() + ffl.Output.Write(ffl.reader.Slice(startPos, endPos)) + return iftrue +} + +func (ffl *FFLexer) lexComment() FFTok { + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + if c == '/' { + // a // comment, scan until line ends. + for { + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + if c == '\n' { + return FFTok_comment + } + } + } else if c == '*' { + // a /* */ comment, scan */ + for { + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + if c == '*' { + c, err := ffl.readByte() + + if err != nil { + return FFTok_error + } + + if c == '/' { + return FFTok_comment + } + + ffl.Error = FFErr_incomplete_comment + return FFTok_error + } + } + } else { + ffl.Error = FFErr_incomplete_comment + return FFTok_error + } +} + +func (ffl *FFLexer) lexString() FFTok { + if ffl.captureAll { + ffl.buf.Reset() + err := ffl.reader.SliceString(&ffl.buf) + + if err != nil { + ffl.BigError = err + return FFTok_error + } + + WriteJson(ffl.Output, ffl.buf.Bytes()) + + return FFTok_string + } else { + err := ffl.reader.SliceString(ffl.Output) + + if err != nil { + ffl.BigError = err + return FFTok_error + } + + return FFTok_string + } +} + +func (ffl *FFLexer) lexNumber() FFTok { + var numRead int = 0 + tok := FFTok_integer + startPos := ffl.reader.Pos() + + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + /* optional leading minus */ + if c == '-' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + /* a single zero, or a series of integers */ + if c == '0' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } else if c >= '1' && c <= '9' { + for c >= '0' && c <= '9' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + } else { + ffl.unreadByte() + ffl.Error = FFErr_missing_integer_after_minus + return FFTok_error + } + + if c == '.' { + numRead = 0 + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + + for c >= '0' && c <= '9' { + numRead++ + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + if numRead == 0 { + ffl.unreadByte() + + ffl.Error = FFErr_missing_integer_after_decimal + return FFTok_error + } + + tok = FFTok_double + } + + /* optional exponent (indicates this is floating point) */ + if c == 'e' || c == 'E' { + numRead = 0 + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + + /* optional sign */ + if c == '+' || c == '-' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + for c >= '0' && c <= '9' { + numRead++ + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + if numRead == 0 { + ffl.Error = FFErr_missing_integer_after_exponent + return FFTok_error + } + + tok = FFTok_double + } + + ffl.unreadByte() + + endPos := ffl.reader.Pos() + ffl.Output.Write(ffl.reader.Slice(startPos, endPos)) + return tok +} + +var true_bytes = []byte{'r', 'u', 'e'} +var false_bytes = []byte{'a', 'l', 's', 'e'} +var null_bytes = []byte{'u', 'l', 'l'} + +func (ffl *FFLexer) Scan() FFTok { + tok := FFTok_error + if ffl.captureAll == false { + ffl.Output.Reset() + } + ffl.Token = FFTok_init + + for { + c, err := ffl.scanReadByte() + if err != nil { + if err == io.EOF { + return FFTok_eof + } else { + return FFTok_error + } + } + + switch c { + case '{': + tok = FFTok_left_bracket + if ffl.captureAll { + ffl.Output.WriteByte('{') + } + goto lexed + case '}': + tok = FFTok_right_bracket + if ffl.captureAll { + ffl.Output.WriteByte('}') + } + goto lexed + case '[': + tok = FFTok_left_brace + if ffl.captureAll { + ffl.Output.WriteByte('[') + } + goto lexed + case ']': + tok = FFTok_right_brace + if ffl.captureAll { + ffl.Output.WriteByte(']') + } + goto lexed + case ',': + tok = FFTok_comma + if ffl.captureAll { + ffl.Output.WriteByte(',') + } + goto lexed + case ':': + tok = FFTok_colon + if ffl.captureAll { + ffl.Output.WriteByte(':') + } + goto lexed + case '\t', '\n', '\v', '\f', '\r', ' ': + if ffl.captureAll { + ffl.Output.WriteByte(c) + } + break + case 't': + ffl.Output.WriteByte('t') + tok = ffl.wantBytes(true_bytes, FFTok_bool) + goto lexed + case 'f': + ffl.Output.WriteByte('f') + tok = ffl.wantBytes(false_bytes, FFTok_bool) + goto lexed + case 'n': + ffl.Output.WriteByte('n') + tok = ffl.wantBytes(null_bytes, FFTok_null) + goto lexed + case '"': + tok = ffl.lexString() + goto lexed + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + ffl.unreadByte() + tok = ffl.lexNumber() + goto lexed + case '/': + tok = ffl.lexComment() + goto lexed + default: + tok = FFTok_error + ffl.Error = FFErr_invalid_char + } + } + +lexed: + ffl.Token = tok + return tok +} + +func (ffl *FFLexer) scanField(start FFTok, capture bool) ([]byte, error) { + switch start { + case FFTok_left_brace, + FFTok_left_bracket: + { + end := FFTok_right_brace + if start == FFTok_left_bracket { + end = FFTok_right_bracket + if capture { + ffl.Output.WriteByte('{') + } + } else { + if capture { + ffl.Output.WriteByte('[') + } + } + + depth := 1 + if capture { + ffl.captureAll = true + } + // TODO: work. + scanloop: + for { + tok := ffl.Scan() + //fmt.Printf("capture-token: %v end: %v depth: %v\n", tok, end, depth) + switch tok { + case FFTok_eof: + return nil, errors.New("ffjson: unexpected EOF") + case FFTok_error: + if ffl.BigError != nil { + return nil, ffl.BigError + } + return nil, ffl.Error.ToError() + case end: + depth-- + if depth == 0 { + break scanloop + } + case start: + depth++ + } + } + + if capture { + ffl.captureAll = false + } + + if capture { + return ffl.Output.Bytes(), nil + } else { + return nil, nil + } + } + case FFTok_bool, + FFTok_integer, + FFTok_null, + FFTok_double: + // simple value, return it. + if capture { + return ffl.Output.Bytes(), nil + } else { + return nil, nil + } + + case FFTok_string: + //TODO(pquerna): so, other users expect this to be a quoted string :( + if capture { + ffl.buf.Reset() + WriteJson(&ffl.buf, ffl.Output.Bytes()) + return ffl.buf.Bytes(), nil + } else { + return nil, nil + } + } + + return nil, fmt.Errorf("ffjson: invalid capture type: %v", start) +} + +// Captures an entire field value, including recursive objects, +// and converts them to a []byte suitable to pass to a sub-object's +// UnmarshalJSON +func (ffl *FFLexer) CaptureField(start FFTok) ([]byte, error) { + return ffl.scanField(start, true) +} + +func (ffl *FFLexer) SkipField(start FFTok) error { + _, err := ffl.scanField(start, false) + return err +} + +// TODO(pquerna): return line number and offset. +func (err FFErr) ToError() error { + switch err { + case FFErr_e_ok: + return nil + case FFErr_io: + return errors.New("ffjson: IO error") + case FFErr_string_invalid_utf8: + return errors.New("ffjson: string with invalid UTF-8 sequence") + case FFErr_string_invalid_escaped_char: + return errors.New("ffjson: string with invalid escaped character") + case FFErr_string_invalid_json_char: + return errors.New("ffjson: string with invalid JSON character") + case FFErr_string_invalid_hex_char: + return errors.New("ffjson: string with invalid hex character") + case FFErr_invalid_char: + return errors.New("ffjson: invalid character") + case FFErr_invalid_string: + return errors.New("ffjson: invalid string") + case FFErr_missing_integer_after_decimal: + return errors.New("ffjson: missing integer after decimal") + case FFErr_missing_integer_after_exponent: + return errors.New("ffjson: missing integer after exponent") + case FFErr_missing_integer_after_minus: + return errors.New("ffjson: missing integer after minus") + case FFErr_unallowed_comment: + return errors.New("ffjson: unallowed comment") + case FFErr_incomplete_comment: + return errors.New("ffjson: incomplete comment") + case FFErr_unexpected_token_type: + return errors.New("ffjson: unexpected token sequence") + } + + panic(fmt.Sprintf("unknown error type: %v ", err)) +} + +func (state FFParseState) String() string { + switch state { + case FFParse_map_start: + return "map:start" + case FFParse_want_key: + return "want_key" + case FFParse_want_colon: + return "want_colon" + case FFParse_want_value: + return "want_value" + case FFParse_after_value: + return "after_value" + } + + panic(fmt.Sprintf("unknown parse state: %d", int(state))) +} + +func (tok FFTok) String() string { + switch tok { + case FFTok_init: + return "tok:init" + case FFTok_bool: + return "tok:bool" + case FFTok_colon: + return "tok:colon" + case FFTok_comma: + return "tok:comma" + case FFTok_eof: + return "tok:eof" + case FFTok_error: + return "tok:error" + case FFTok_left_brace: + return "tok:left_brace" + case FFTok_left_bracket: + return "tok:left_bracket" + case FFTok_null: + return "tok:null" + case FFTok_right_brace: + return "tok:right_brace" + case FFTok_right_bracket: + return "tok:right_bracket" + case FFTok_integer: + return "tok:integer" + case FFTok_double: + return "tok:double" + case FFTok_string: + return "tok:string" + case FFTok_comment: + return "comment" + } + + panic(fmt.Sprintf("unknown token: %d", int(tok))) +} + +/* a lookup table which lets us quickly determine three things: + * cVEC - valid escaped control char + * note. the solidus '/' may be escaped or not. + * cIJC - invalid json char + * cVHC - valid hex char + * cNFP - needs further processing (from a string scanning perspective) + * cNUC - needs utf8 checking when enabled (from a string scanning perspective) + */ + +const ( + cVEC int8 = 0x01 + cIJC int8 = 0x02 + cVHC int8 = 0x04 + cNFP int8 = 0x08 + cNUC int8 = 0x10 +) + +var byteLookupTable [256]int8 = [256]int8{ + cIJC, /* 0 */ + cIJC, /* 1 */ + cIJC, /* 2 */ + cIJC, /* 3 */ + cIJC, /* 4 */ + cIJC, /* 5 */ + cIJC, /* 6 */ + cIJC, /* 7 */ + cIJC, /* 8 */ + cIJC, /* 9 */ + cIJC, /* 10 */ + cIJC, /* 11 */ + cIJC, /* 12 */ + cIJC, /* 13 */ + cIJC, /* 14 */ + cIJC, /* 15 */ + cIJC, /* 16 */ + cIJC, /* 17 */ + cIJC, /* 18 */ + cIJC, /* 19 */ + cIJC, /* 20 */ + cIJC, /* 21 */ + cIJC, /* 22 */ + cIJC, /* 23 */ + cIJC, /* 24 */ + cIJC, /* 25 */ + cIJC, /* 26 */ + cIJC, /* 27 */ + cIJC, /* 28 */ + cIJC, /* 29 */ + cIJC, /* 30 */ + cIJC, /* 31 */ + 0, /* 32 */ + 0, /* 33 */ + cVEC | cIJC | cNFP, /* 34 */ + 0, /* 35 */ + 0, /* 36 */ + 0, /* 37 */ + 0, /* 38 */ + 0, /* 39 */ + 0, /* 40 */ + 0, /* 41 */ + 0, /* 42 */ + 0, /* 43 */ + 0, /* 44 */ + 0, /* 45 */ + 0, /* 46 */ + cVEC, /* 47 */ + cVHC, /* 48 */ + cVHC, /* 49 */ + cVHC, /* 50 */ + cVHC, /* 51 */ + cVHC, /* 52 */ + cVHC, /* 53 */ + cVHC, /* 54 */ + cVHC, /* 55 */ + cVHC, /* 56 */ + cVHC, /* 57 */ + 0, /* 58 */ + 0, /* 59 */ + 0, /* 60 */ + 0, /* 61 */ + 0, /* 62 */ + 0, /* 63 */ + 0, /* 64 */ + cVHC, /* 65 */ + cVHC, /* 66 */ + cVHC, /* 67 */ + cVHC, /* 68 */ + cVHC, /* 69 */ + cVHC, /* 70 */ + 0, /* 71 */ + 0, /* 72 */ + 0, /* 73 */ + 0, /* 74 */ + 0, /* 75 */ + 0, /* 76 */ + 0, /* 77 */ + 0, /* 78 */ + 0, /* 79 */ + 0, /* 80 */ + 0, /* 81 */ + 0, /* 82 */ + 0, /* 83 */ + 0, /* 84 */ + 0, /* 85 */ + 0, /* 86 */ + 0, /* 87 */ + 0, /* 88 */ + 0, /* 89 */ + 0, /* 90 */ + 0, /* 91 */ + cVEC | cIJC | cNFP, /* 92 */ + 0, /* 93 */ + 0, /* 94 */ + 0, /* 95 */ + 0, /* 96 */ + cVHC, /* 97 */ + cVEC | cVHC, /* 98 */ + cVHC, /* 99 */ + cVHC, /* 100 */ + cVHC, /* 101 */ + cVEC | cVHC, /* 102 */ + 0, /* 103 */ + 0, /* 104 */ + 0, /* 105 */ + 0, /* 106 */ + 0, /* 107 */ + 0, /* 108 */ + 0, /* 109 */ + cVEC, /* 110 */ + 0, /* 111 */ + 0, /* 112 */ + 0, /* 113 */ + cVEC, /* 114 */ + 0, /* 115 */ + cVEC, /* 116 */ + 0, /* 117 */ + 0, /* 118 */ + 0, /* 119 */ + 0, /* 120 */ + 0, /* 121 */ + 0, /* 122 */ + 0, /* 123 */ + 0, /* 124 */ + 0, /* 125 */ + 0, /* 126 */ + 0, /* 127 */ + cNUC, /* 128 */ + cNUC, /* 129 */ + cNUC, /* 130 */ + cNUC, /* 131 */ + cNUC, /* 132 */ + cNUC, /* 133 */ + cNUC, /* 134 */ + cNUC, /* 135 */ + cNUC, /* 136 */ + cNUC, /* 137 */ + cNUC, /* 138 */ + cNUC, /* 139 */ + cNUC, /* 140 */ + cNUC, /* 141 */ + cNUC, /* 142 */ + cNUC, /* 143 */ + cNUC, /* 144 */ + cNUC, /* 145 */ + cNUC, /* 146 */ + cNUC, /* 147 */ + cNUC, /* 148 */ + cNUC, /* 149 */ + cNUC, /* 150 */ + cNUC, /* 151 */ + cNUC, /* 152 */ + cNUC, /* 153 */ + cNUC, /* 154 */ + cNUC, /* 155 */ + cNUC, /* 156 */ + cNUC, /* 157 */ + cNUC, /* 158 */ + cNUC, /* 159 */ + cNUC, /* 160 */ + cNUC, /* 161 */ + cNUC, /* 162 */ + cNUC, /* 163 */ + cNUC, /* 164 */ + cNUC, /* 165 */ + cNUC, /* 166 */ + cNUC, /* 167 */ + cNUC, /* 168 */ + cNUC, /* 169 */ + cNUC, /* 170 */ + cNUC, /* 171 */ + cNUC, /* 172 */ + cNUC, /* 173 */ + cNUC, /* 174 */ + cNUC, /* 175 */ + cNUC, /* 176 */ + cNUC, /* 177 */ + cNUC, /* 178 */ + cNUC, /* 179 */ + cNUC, /* 180 */ + cNUC, /* 181 */ + cNUC, /* 182 */ + cNUC, /* 183 */ + cNUC, /* 184 */ + cNUC, /* 185 */ + cNUC, /* 186 */ + cNUC, /* 187 */ + cNUC, /* 188 */ + cNUC, /* 189 */ + cNUC, /* 190 */ + cNUC, /* 191 */ + cNUC, /* 192 */ + cNUC, /* 193 */ + cNUC, /* 194 */ + cNUC, /* 195 */ + cNUC, /* 196 */ + cNUC, /* 197 */ + cNUC, /* 198 */ + cNUC, /* 199 */ + cNUC, /* 200 */ + cNUC, /* 201 */ + cNUC, /* 202 */ + cNUC, /* 203 */ + cNUC, /* 204 */ + cNUC, /* 205 */ + cNUC, /* 206 */ + cNUC, /* 207 */ + cNUC, /* 208 */ + cNUC, /* 209 */ + cNUC, /* 210 */ + cNUC, /* 211 */ + cNUC, /* 212 */ + cNUC, /* 213 */ + cNUC, /* 214 */ + cNUC, /* 215 */ + cNUC, /* 216 */ + cNUC, /* 217 */ + cNUC, /* 218 */ + cNUC, /* 219 */ + cNUC, /* 220 */ + cNUC, /* 221 */ + cNUC, /* 222 */ + cNUC, /* 223 */ + cNUC, /* 224 */ + cNUC, /* 225 */ + cNUC, /* 226 */ + cNUC, /* 227 */ + cNUC, /* 228 */ + cNUC, /* 229 */ + cNUC, /* 230 */ + cNUC, /* 231 */ + cNUC, /* 232 */ + cNUC, /* 233 */ + cNUC, /* 234 */ + cNUC, /* 235 */ + cNUC, /* 236 */ + cNUC, /* 237 */ + cNUC, /* 238 */ + cNUC, /* 239 */ + cNUC, /* 240 */ + cNUC, /* 241 */ + cNUC, /* 242 */ + cNUC, /* 243 */ + cNUC, /* 244 */ + cNUC, /* 245 */ + cNUC, /* 246 */ + cNUC, /* 247 */ + cNUC, /* 248 */ + cNUC, /* 249 */ + cNUC, /* 250 */ + cNUC, /* 251 */ + cNUC, /* 252 */ + cNUC, /* 253 */ + cNUC, /* 254 */ + cNUC, /* 255 */ +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go new file mode 100644 index 0000000..0f22c46 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go @@ -0,0 +1,512 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package v1 + +import ( + "fmt" + "io" + "unicode" + "unicode/utf16" +) + +const sliceStringMask = cIJC | cNFP + +type ffReader struct { + s []byte + i int + l int +} + +func newffReader(d []byte) *ffReader { + return &ffReader{ + s: d, + i: 0, + l: len(d), + } +} + +func (r *ffReader) Slice(start, stop int) []byte { + return r.s[start:stop] +} + +func (r *ffReader) Pos() int { + return r.i +} + +// Reset the reader, and add new input. +func (r *ffReader) Reset(d []byte) { + r.s = d + r.i = 0 + r.l = len(d) +} + +// Calcuates the Position with line and line offset, +// because this isn't counted for performance reasons, +// it will iterate the buffer from the beginning, and should +// only be used in error-paths. +func (r *ffReader) PosWithLine() (int, int) { + currentLine := 1 + currentChar := 0 + + for i := 0; i < r.i; i++ { + c := r.s[i] + currentChar++ + if c == '\n' { + currentLine++ + currentChar = 0 + } + } + + return currentLine, currentChar +} + +func (r *ffReader) ReadByteNoWS() (byte, error) { + if r.i >= r.l { + return 0, io.EOF + } + + j := r.i + + for { + c := r.s[j] + j++ + + // inline whitespace parsing gives another ~8% performance boost + // for many kinds of nicely indented JSON. + // ... and using a [255]bool instead of multiple ifs, gives another 2% + /* + if c != '\t' && + c != '\n' && + c != '\v' && + c != '\f' && + c != '\r' && + c != ' ' { + r.i = j + return c, nil + } + */ + if whitespaceLookupTable[c] == false { + r.i = j + return c, nil + } + + if j >= r.l { + return 0, io.EOF + } + } +} + +func (r *ffReader) ReadByte() (byte, error) { + if r.i >= r.l { + return 0, io.EOF + } + + r.i++ + + return r.s[r.i-1], nil +} + +func (r *ffReader) UnreadByte() error { + if r.i <= 0 { + panic("ffReader.UnreadByte: at beginning of slice") + } + r.i-- + return nil +} + +func (r *ffReader) readU4(j int) (rune, error) { + + var u4 [4]byte + for i := 0; i < 4; i++ { + if j >= r.l { + return -1, io.EOF + } + c := r.s[j] + if byteLookupTable[c]&cVHC != 0 { + u4[i] = c + j++ + continue + } else { + // TODO(pquerna): handle errors better. layering violation. + return -1, fmt.Errorf("lex_string_invalid_hex_char: %v %v", c, string(u4[:])) + } + } + + // TODO(pquerna): utf16.IsSurrogate + rr, err := ParseUint(u4[:], 16, 64) + if err != nil { + return -1, err + } + return rune(rr), nil +} + +func (r *ffReader) handleEscaped(c byte, j int, out DecodingBuffer) (int, error) { + if j >= r.l { + return 0, io.EOF + } + + c = r.s[j] + j++ + + if c == 'u' { + ru, err := r.readU4(j) + if err != nil { + return 0, err + } + + if utf16.IsSurrogate(ru) { + ru2, err := r.readU4(j + 6) + if err != nil { + return 0, err + } + out.Write(r.s[r.i : j-2]) + r.i = j + 10 + j = r.i + rval := utf16.DecodeRune(ru, ru2) + if rval != unicode.ReplacementChar { + out.WriteRune(rval) + } else { + return 0, fmt.Errorf("lex_string_invalid_unicode_surrogate: %v %v", ru, ru2) + } + } else { + out.Write(r.s[r.i : j-2]) + r.i = j + 4 + j = r.i + out.WriteRune(ru) + } + return j, nil + } else if byteLookupTable[c]&cVEC == 0 { + return 0, fmt.Errorf("lex_string_invalid_escaped_char: %v", c) + } else { + out.Write(r.s[r.i : j-2]) + r.i = j + j = r.i + + switch c { + case '"': + out.WriteByte('"') + case '\\': + out.WriteByte('\\') + case '/': + out.WriteByte('/') + case 'b': + out.WriteByte('\b') + case 'f': + out.WriteByte('\f') + case 'n': + out.WriteByte('\n') + case 'r': + out.WriteByte('\r') + case 't': + out.WriteByte('\t') + } + } + + return j, nil +} + +func (r *ffReader) SliceString(out DecodingBuffer) error { + var c byte + // TODO(pquerna): string_with_escapes? de-escape here? + j := r.i + + for { + if j >= r.l { + return io.EOF + } + + j, c = scanString(r.s, j) + + if c == '"' { + if j != r.i { + out.Write(r.s[r.i : j-1]) + r.i = j + } + return nil + } else if c == '\\' { + var err error + j, err = r.handleEscaped(c, j, out) + if err != nil { + return err + } + } else if byteLookupTable[c]&cIJC != 0 { + return fmt.Errorf("lex_string_invalid_json_char: %v", c) + } + continue + } +} + +// TODO(pquerna): consider combining wibth the normal byte mask. +var whitespaceLookupTable [256]bool = [256]bool{ + false, /* 0 */ + false, /* 1 */ + false, /* 2 */ + false, /* 3 */ + false, /* 4 */ + false, /* 5 */ + false, /* 6 */ + false, /* 7 */ + false, /* 8 */ + true, /* 9 */ + true, /* 10 */ + true, /* 11 */ + true, /* 12 */ + true, /* 13 */ + false, /* 14 */ + false, /* 15 */ + false, /* 16 */ + false, /* 17 */ + false, /* 18 */ + false, /* 19 */ + false, /* 20 */ + false, /* 21 */ + false, /* 22 */ + false, /* 23 */ + false, /* 24 */ + false, /* 25 */ + false, /* 26 */ + false, /* 27 */ + false, /* 28 */ + false, /* 29 */ + false, /* 30 */ + false, /* 31 */ + true, /* 32 */ + false, /* 33 */ + false, /* 34 */ + false, /* 35 */ + false, /* 36 */ + false, /* 37 */ + false, /* 38 */ + false, /* 39 */ + false, /* 40 */ + false, /* 41 */ + false, /* 42 */ + false, /* 43 */ + false, /* 44 */ + false, /* 45 */ + false, /* 46 */ + false, /* 47 */ + false, /* 48 */ + false, /* 49 */ + false, /* 50 */ + false, /* 51 */ + false, /* 52 */ + false, /* 53 */ + false, /* 54 */ + false, /* 55 */ + false, /* 56 */ + false, /* 57 */ + false, /* 58 */ + false, /* 59 */ + false, /* 60 */ + false, /* 61 */ + false, /* 62 */ + false, /* 63 */ + false, /* 64 */ + false, /* 65 */ + false, /* 66 */ + false, /* 67 */ + false, /* 68 */ + false, /* 69 */ + false, /* 70 */ + false, /* 71 */ + false, /* 72 */ + false, /* 73 */ + false, /* 74 */ + false, /* 75 */ + false, /* 76 */ + false, /* 77 */ + false, /* 78 */ + false, /* 79 */ + false, /* 80 */ + false, /* 81 */ + false, /* 82 */ + false, /* 83 */ + false, /* 84 */ + false, /* 85 */ + false, /* 86 */ + false, /* 87 */ + false, /* 88 */ + false, /* 89 */ + false, /* 90 */ + false, /* 91 */ + false, /* 92 */ + false, /* 93 */ + false, /* 94 */ + false, /* 95 */ + false, /* 96 */ + false, /* 97 */ + false, /* 98 */ + false, /* 99 */ + false, /* 100 */ + false, /* 101 */ + false, /* 102 */ + false, /* 103 */ + false, /* 104 */ + false, /* 105 */ + false, /* 106 */ + false, /* 107 */ + false, /* 108 */ + false, /* 109 */ + false, /* 110 */ + false, /* 111 */ + false, /* 112 */ + false, /* 113 */ + false, /* 114 */ + false, /* 115 */ + false, /* 116 */ + false, /* 117 */ + false, /* 118 */ + false, /* 119 */ + false, /* 120 */ + false, /* 121 */ + false, /* 122 */ + false, /* 123 */ + false, /* 124 */ + false, /* 125 */ + false, /* 126 */ + false, /* 127 */ + false, /* 128 */ + false, /* 129 */ + false, /* 130 */ + false, /* 131 */ + false, /* 132 */ + false, /* 133 */ + false, /* 134 */ + false, /* 135 */ + false, /* 136 */ + false, /* 137 */ + false, /* 138 */ + false, /* 139 */ + false, /* 140 */ + false, /* 141 */ + false, /* 142 */ + false, /* 143 */ + false, /* 144 */ + false, /* 145 */ + false, /* 146 */ + false, /* 147 */ + false, /* 148 */ + false, /* 149 */ + false, /* 150 */ + false, /* 151 */ + false, /* 152 */ + false, /* 153 */ + false, /* 154 */ + false, /* 155 */ + false, /* 156 */ + false, /* 157 */ + false, /* 158 */ + false, /* 159 */ + false, /* 160 */ + false, /* 161 */ + false, /* 162 */ + false, /* 163 */ + false, /* 164 */ + false, /* 165 */ + false, /* 166 */ + false, /* 167 */ + false, /* 168 */ + false, /* 169 */ + false, /* 170 */ + false, /* 171 */ + false, /* 172 */ + false, /* 173 */ + false, /* 174 */ + false, /* 175 */ + false, /* 176 */ + false, /* 177 */ + false, /* 178 */ + false, /* 179 */ + false, /* 180 */ + false, /* 181 */ + false, /* 182 */ + false, /* 183 */ + false, /* 184 */ + false, /* 185 */ + false, /* 186 */ + false, /* 187 */ + false, /* 188 */ + false, /* 189 */ + false, /* 190 */ + false, /* 191 */ + false, /* 192 */ + false, /* 193 */ + false, /* 194 */ + false, /* 195 */ + false, /* 196 */ + false, /* 197 */ + false, /* 198 */ + false, /* 199 */ + false, /* 200 */ + false, /* 201 */ + false, /* 202 */ + false, /* 203 */ + false, /* 204 */ + false, /* 205 */ + false, /* 206 */ + false, /* 207 */ + false, /* 208 */ + false, /* 209 */ + false, /* 210 */ + false, /* 211 */ + false, /* 212 */ + false, /* 213 */ + false, /* 214 */ + false, /* 215 */ + false, /* 216 */ + false, /* 217 */ + false, /* 218 */ + false, /* 219 */ + false, /* 220 */ + false, /* 221 */ + false, /* 222 */ + false, /* 223 */ + false, /* 224 */ + false, /* 225 */ + false, /* 226 */ + false, /* 227 */ + false, /* 228 */ + false, /* 229 */ + false, /* 230 */ + false, /* 231 */ + false, /* 232 */ + false, /* 233 */ + false, /* 234 */ + false, /* 235 */ + false, /* 236 */ + false, /* 237 */ + false, /* 238 */ + false, /* 239 */ + false, /* 240 */ + false, /* 241 */ + false, /* 242 */ + false, /* 243 */ + false, /* 244 */ + false, /* 245 */ + false, /* 246 */ + false, /* 247 */ + false, /* 248 */ + false, /* 249 */ + false, /* 250 */ + false, /* 251 */ + false, /* 252 */ + false, /* 253 */ + false, /* 254 */ + false, /* 255 */ +} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go new file mode 100644 index 0000000..47c2607 --- /dev/null +++ b/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go @@ -0,0 +1,34 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package v1 + +func scanString(s []byte, j int) (int, byte) { + for { + if j >= len(s) { + return j, 0 + } + + c := s[j] + j++ + if byteLookupTable[c]&sliceStringMask == 0 { + continue + } + + return j, c + } +} diff --git a/vendor/github.com/tchap/go-patricia/LICENSE b/vendor/github.com/tchap/go-patricia/LICENSE new file mode 100644 index 0000000..e50d398 --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 The AUTHORS + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tchap/go-patricia/README.md b/vendor/github.com/tchap/go-patricia/README.md new file mode 100644 index 0000000..d9cbf62 --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/README.md @@ -0,0 +1,123 @@ +# go-patricia # + +**Documentation**: [GoDoc](http://godoc.org/github.com/tchap/go-patricia/patricia)
+**Build Status**: [![Build +Status](https://drone.io/github.com/tchap/go-patricia/status.png)](https://drone.io/github.com/tchap/go-patricia/latest)
+**Test Coverage**: [![Coverage +Status](https://coveralls.io/repos/tchap/go-patricia/badge.png)](https://coveralls.io/r/tchap/go-patricia) + +## About ## + +A generic patricia trie (also called radix tree) implemented in Go (Golang). + +The patricia trie as implemented in this library enables fast visiting of items +in some particular ways: + +1. visit all items saved in the tree, +2. visit all items matching particular prefix (visit subtree), or +3. given a string, visit all items matching some prefix of that string. + +`[]byte` type is used for keys, `interface{}` for values. + +`Trie` is not thread safe. Synchronize the access yourself. + +### State of the Project ### + +Apparently some people are using this, so the API should not change often. +Any ideas on how to make the library better are still welcome. + +More (unit) testing would be cool as well... + +## Usage ## + +Import the package from GitHub first. + +```go +import "github.com/tchap/go-patricia/patricia" +``` + +You can as well use gopkg.in thingie: + +```go +import "gopkg.in/tchap/go-patricia.v2/patricia" +``` + +Then you can start having fun. + +```go +printItem := func(prefix patricia.Prefix, item patricia.Item) error { + fmt.Printf("%q: %v\n", prefix, item) + return nil +} + +// Create a new default trie (using the default parameter values). +trie := NewTrie() + +// Create a new custom trie. +trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10)) + +// Insert some items. +trie.Insert(Prefix("Pepa Novak"), 1) +trie.Insert(Prefix("Pepa Sindelar"), 2) +trie.Insert(Prefix("Karel Macha"), 3) +trie.Insert(Prefix("Karel Hynek Macha"), 4) + +// Just check if some things are present in the tree. +key := Prefix("Pepa Novak") +fmt.Printf("%q present? %v\n", key, trie.Match(key)) +// "Pepa Novak" present? true +key = Prefix("Karel") +fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key)) +// Anybody called "Karel" here? true + +// Walk the tree in alphabetical order. +trie.Visit(printItem) +// "Karel Hynek Macha": 4 +// "Karel Macha": 3 +// "Pepa Novak": 1 +// "Pepa Sindelar": 2 + +// Walk a subtree. +trie.VisitSubtree(Prefix("Pepa"), printItem) +// "Pepa Novak": 1 +// "Pepa Sindelar": 2 + +// Modify an item, then fetch it from the tree. +trie.Set(Prefix("Karel Hynek Macha"), 10) +key = Prefix("Karel Hynek Macha") +fmt.Printf("%q: %v\n", key, trie.Get(key)) +// "Karel Hynek Macha": 10 + +// Walk prefixes. +prefix := Prefix("Karel Hynek Macha je kouzelnik") +trie.VisitPrefixes(prefix, printItem) +// "Karel Hynek Macha": 10 + +// Delete some items. +trie.Delete(Prefix("Pepa Novak")) +trie.Delete(Prefix("Karel Macha")) + +// Walk again. +trie.Visit(printItem) +// "Karel Hynek Macha": 10 +// "Pepa Sindelar": 2 + +// Delete a subtree. +trie.DeleteSubtree(Prefix("Pepa")) + +// Print what is left. +trie.Visit(printItem) +// "Karel Hynek Macha": 10 +``` + +## License ## + +MIT, check the `LICENSE` file. + +[![Gittip +Badge](http://img.shields.io/gittip/alanhamlett.png)](https://www.gittip.com/tchap/ +"Gittip Badge") + +[![Bitdeli +Badge](https://d2weczhvl823v0.cloudfront.net/tchap/go-patricia/trend.png)](https://bitdeli.com/free +"Bitdeli Badge") diff --git a/vendor/github.com/tchap/go-patricia/patricia/children.go b/vendor/github.com/tchap/go-patricia/patricia/children.go new file mode 100644 index 0000000..a5677c3 --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/patricia/children.go @@ -0,0 +1,325 @@ +// Copyright (c) 2014 The go-patricia AUTHORS +// +// Use of this source code is governed by The MIT License +// that can be found in the LICENSE file. + +package patricia + +import ( + "fmt" + "io" + "sort" +) + +type childList interface { + length() int + head() *Trie + add(child *Trie) childList + remove(b byte) + replace(b byte, child *Trie) + next(b byte) *Trie + walk(prefix *Prefix, visitor VisitorFunc) error + print(w io.Writer, indent int) + total() int +} + +type tries []*Trie + +func (t tries) Len() int { + return len(t) +} + +func (t tries) Less(i, j int) bool { + strings := sort.StringSlice{string(t[i].prefix), string(t[j].prefix)} + return strings.Less(0, 1) +} + +func (t tries) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +type sparseChildList struct { + children tries +} + +func newSparseChildList(maxChildrenPerSparseNode int) childList { + return &sparseChildList{ + children: make(tries, 0, maxChildrenPerSparseNode), + } +} + +func (list *sparseChildList) length() int { + return len(list.children) +} + +func (list *sparseChildList) head() *Trie { + return list.children[0] +} + +func (list *sparseChildList) add(child *Trie) childList { + // Search for an empty spot and insert the child if possible. + if len(list.children) != cap(list.children) { + list.children = append(list.children, child) + return list + } + + // Otherwise we have to transform to the dense list type. + return newDenseChildList(list, child) +} + +func (list *sparseChildList) remove(b byte) { + for i, node := range list.children { + if node.prefix[0] == b { + list.children[i] = list.children[len(list.children)-1] + list.children[len(list.children)-1] = nil + list.children = list.children[:len(list.children)-1] + return + } + } + + // This is not supposed to be reached. + panic("removing non-existent child") +} + +func (list *sparseChildList) replace(b byte, child *Trie) { + // Make a consistency check. + if p0 := child.prefix[0]; p0 != b { + panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b)) + } + + // Seek the child and replace it. + for i, node := range list.children { + if node.prefix[0] == b { + list.children[i] = child + return + } + } +} + +func (list *sparseChildList) next(b byte) *Trie { + for _, child := range list.children { + if child.prefix[0] == b { + return child + } + } + return nil +} + +func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error { + + sort.Sort(list.children) + + for _, child := range list.children { + *prefix = append(*prefix, child.prefix...) + if child.item != nil { + err := visitor(*prefix, child.item) + if err != nil { + if err == SkipSubtree { + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + continue + } + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + return err + } + } + + err := child.children.walk(prefix, visitor) + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + if err != nil { + return err + } + } + + return nil +} + +func (list *sparseChildList) total() int { + tot := 0 + for _, child := range list.children { + if child != nil { + tot = tot + child.total() + } + } + return tot +} + +func (list *sparseChildList) print(w io.Writer, indent int) { + for _, child := range list.children { + if child != nil { + child.print(w, indent) + } + } +} + +type denseChildList struct { + min int + max int + numChildren int + headIndex int + children []*Trie +} + +func newDenseChildList(list *sparseChildList, child *Trie) childList { + var ( + min int = 255 + max int = 0 + ) + for _, child := range list.children { + b := int(child.prefix[0]) + if b < min { + min = b + } + if b > max { + max = b + } + } + + b := int(child.prefix[0]) + if b < min { + min = b + } + if b > max { + max = b + } + + children := make([]*Trie, max-min+1) + for _, child := range list.children { + children[int(child.prefix[0])-min] = child + } + children[int(child.prefix[0])-min] = child + + return &denseChildList{ + min: min, + max: max, + numChildren: list.length() + 1, + headIndex: 0, + children: children, + } +} + +func (list *denseChildList) length() int { + return list.numChildren +} + +func (list *denseChildList) head() *Trie { + return list.children[list.headIndex] +} + +func (list *denseChildList) add(child *Trie) childList { + b := int(child.prefix[0]) + var i int + + switch { + case list.min <= b && b <= list.max: + if list.children[b-list.min] != nil { + panic("dense child list collision detected") + } + i = b - list.min + list.children[i] = child + + case b < list.min: + children := make([]*Trie, list.max-b+1) + i = 0 + children[i] = child + copy(children[list.min-b:], list.children) + list.children = children + list.min = b + + default: // b > list.max + children := make([]*Trie, b-list.min+1) + i = b - list.min + children[i] = child + copy(children, list.children) + list.children = children + list.max = b + } + + list.numChildren++ + if i < list.headIndex { + list.headIndex = i + } + return list +} + +func (list *denseChildList) remove(b byte) { + i := int(b) - list.min + if list.children[i] == nil { + // This is not supposed to be reached. + panic("removing non-existent child") + } + list.numChildren-- + list.children[i] = nil + + // Update head index. + if i == list.headIndex { + for ; i < len(list.children); i++ { + if list.children[i] != nil { + list.headIndex = i + return + } + } + } +} + +func (list *denseChildList) replace(b byte, child *Trie) { + // Make a consistency check. + if p0 := child.prefix[0]; p0 != b { + panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b)) + } + + // Replace the child. + list.children[int(b)-list.min] = child +} + +func (list *denseChildList) next(b byte) *Trie { + i := int(b) + if i < list.min || list.max < i { + return nil + } + return list.children[i-list.min] +} + +func (list *denseChildList) walk(prefix *Prefix, visitor VisitorFunc) error { + for _, child := range list.children { + if child == nil { + continue + } + *prefix = append(*prefix, child.prefix...) + if child.item != nil { + if err := visitor(*prefix, child.item); err != nil { + if err == SkipSubtree { + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + continue + } + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + return err + } + } + + err := child.children.walk(prefix, visitor) + *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] + if err != nil { + return err + } + } + + return nil +} + +func (list *denseChildList) print(w io.Writer, indent int) { + for _, child := range list.children { + if child != nil { + child.print(w, indent) + } + } +} + +func (list *denseChildList) total() int { + tot := 0 + for _, child := range list.children { + if child != nil { + tot = tot + child.total() + } + } + return tot +} diff --git a/vendor/github.com/tchap/go-patricia/patricia/patricia.go b/vendor/github.com/tchap/go-patricia/patricia/patricia.go new file mode 100644 index 0000000..a1fc53d --- /dev/null +++ b/vendor/github.com/tchap/go-patricia/patricia/patricia.go @@ -0,0 +1,594 @@ +// Copyright (c) 2014 The go-patricia AUTHORS +// +// Use of this source code is governed by The MIT License +// that can be found in the LICENSE file. + +package patricia + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" +) + +//------------------------------------------------------------------------------ +// Trie +//------------------------------------------------------------------------------ + +const ( + DefaultMaxPrefixPerNode = 10 + DefaultMaxChildrenPerSparseNode = 8 +) + +type ( + Prefix []byte + Item interface{} + VisitorFunc func(prefix Prefix, item Item) error +) + +// Trie is a generic patricia trie that allows fast retrieval of items by prefix. +// and other funky stuff. +// +// Trie is not thread-safe. +type Trie struct { + prefix Prefix + item Item + + maxPrefixPerNode int + maxChildrenPerSparseNode int + + children childList +} + +// Public API ------------------------------------------------------------------ + +type Option func(*Trie) + +// Trie constructor. +func NewTrie(options ...Option) *Trie { + trie := &Trie{} + + for _, opt := range options { + opt(trie) + } + + if trie.maxPrefixPerNode <= 0 { + trie.maxPrefixPerNode = DefaultMaxPrefixPerNode + } + if trie.maxChildrenPerSparseNode <= 0 { + trie.maxChildrenPerSparseNode = DefaultMaxChildrenPerSparseNode + } + + trie.children = newSparseChildList(trie.maxChildrenPerSparseNode) + return trie +} + +func MaxPrefixPerNode(value int) Option { + return func(trie *Trie) { + trie.maxPrefixPerNode = value + } +} + +func MaxChildrenPerSparseNode(value int) Option { + return func(trie *Trie) { + trie.maxChildrenPerSparseNode = value + } +} + +// Item returns the item stored in the root of this trie. +func (trie *Trie) Item() Item { + return trie.item +} + +// Insert inserts a new item into the trie using the given prefix. Insert does +// not replace existing items. It returns false if an item was already in place. +func (trie *Trie) Insert(key Prefix, item Item) (inserted bool) { + return trie.put(key, item, false) +} + +// Set works much like Insert, but it always sets the item, possibly replacing +// the item previously inserted. +func (trie *Trie) Set(key Prefix, item Item) { + trie.put(key, item, true) +} + +// Get returns the item located at key. +// +// This method is a bit dangerous, because Get can as well end up in an internal +// node that is not really representing any user-defined value. So when nil is +// a valid value being used, it is not possible to tell if the value was inserted +// into the tree by the user or not. A possible workaround for this is not to use +// nil interface as a valid value, even using zero value of any type is enough +// to prevent this bad behaviour. +func (trie *Trie) Get(key Prefix) (item Item) { + _, node, found, leftover := trie.findSubtree(key) + if !found || len(leftover) != 0 { + return nil + } + return node.item +} + +// Match returns what Get(prefix) != nil would return. The same warning as for +// Get applies here as well. +func (trie *Trie) Match(prefix Prefix) (matchedExactly bool) { + return trie.Get(prefix) != nil +} + +// MatchSubtree returns true when there is a subtree representing extensions +// to key, that is if there are any keys in the tree which have key as prefix. +func (trie *Trie) MatchSubtree(key Prefix) (matched bool) { + _, _, matched, _ = trie.findSubtree(key) + return +} + +// Visit calls visitor on every node containing a non-nil item +// in alphabetical order. +// +// If an error is returned from visitor, the function stops visiting the tree +// and returns that error, unless it is a special error - SkipSubtree. In that +// case Visit skips the subtree represented by the current node and continues +// elsewhere. +func (trie *Trie) Visit(visitor VisitorFunc) error { + return trie.walk(nil, visitor) +} + +func (trie *Trie) size() int { + n := 0 + + trie.walk(nil, func(prefix Prefix, item Item) error { + n++ + return nil + }) + + return n +} + +func (trie *Trie) total() int { + return 1 + trie.children.total() +} + +// VisitSubtree works much like Visit, but it only visits nodes matching prefix. +func (trie *Trie) VisitSubtree(prefix Prefix, visitor VisitorFunc) error { + // Nil prefix not allowed. + if prefix == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return nil + } + + // Locate the relevant subtree. + _, root, found, leftover := trie.findSubtree(prefix) + if !found { + return nil + } + prefix = append(prefix, leftover...) + + // Visit it. + return root.walk(prefix, visitor) +} + +// VisitPrefixes visits only nodes that represent prefixes of key. +// To say the obvious, returning SkipSubtree from visitor makes no sense here. +func (trie *Trie) VisitPrefixes(key Prefix, visitor VisitorFunc) error { + // Nil key not allowed. + if key == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return nil + } + + // Walk the path matching key prefixes. + node := trie + prefix := key + offset := 0 + for { + // Compute what part of prefix matches. + common := node.longestCommonPrefixLength(key) + key = key[common:] + offset += common + + // Partial match means that there is no subtree matching prefix. + if common < len(node.prefix) { + return nil + } + + // Call the visitor. + if item := node.item; item != nil { + if err := visitor(prefix[:offset], item); err != nil { + return err + } + } + + if len(key) == 0 { + // This node represents key, we are finished. + return nil + } + + // There is some key suffix left, move to the children. + child := node.children.next(key[0]) + if child == nil { + // There is nowhere to continue, return. + return nil + } + + node = child + } +} + +// Delete deletes the item represented by the given prefix. +// +// True is returned if the matching node was found and deleted. +func (trie *Trie) Delete(key Prefix) (deleted bool) { + // Nil prefix not allowed. + if key == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return false + } + + // Find the relevant node. + path, found, _ := trie.findSubtreePath(key) + if !found { + return false + } + + node := path[len(path)-1] + var parent *Trie + if len(path) != 1 { + parent = path[len(path)-2] + } + + // If the item is already set to nil, there is nothing to do. + if node.item == nil { + return false + } + + // Delete the item. + node.item = nil + + // Initialise i before goto. + // Will be used later in a loop. + i := len(path) - 1 + + // In case there are some child nodes, we cannot drop the whole subtree. + // We can try to compact nodes, though. + if node.children.length() != 0 { + goto Compact + } + + // In case we are at the root, just reset it and we are done. + if parent == nil { + node.reset() + return true + } + + // We can drop a subtree. + // Find the first ancestor that has its value set or it has 2 or more child nodes. + // That will be the node where to drop the subtree at. + for ; i >= 0; i-- { + if current := path[i]; current.item != nil || current.children.length() >= 2 { + break + } + } + + // Handle the case when there is no such node. + // In other words, we can reset the whole tree. + if i == -1 { + path[0].reset() + return true + } + + // We can just remove the subtree here. + node = path[i] + if i == 0 { + parent = nil + } else { + parent = path[i-1] + } + // i+1 is always a valid index since i is never pointing to the last node. + // The loop above skips at least the last node since we are sure that the item + // is set to nil and it has no children, othewise we would be compacting instead. + node.children.remove(path[i+1].prefix[0]) + +Compact: + // The node is set to the first non-empty ancestor, + // so try to compact since that might be possible now. + if compacted := node.compact(); compacted != node { + if parent == nil { + *node = *compacted + } else { + parent.children.replace(node.prefix[0], compacted) + *parent = *parent.compact() + } + } + + return true +} + +// DeleteSubtree finds the subtree exactly matching prefix and deletes it. +// +// True is returned if the subtree was found and deleted. +func (trie *Trie) DeleteSubtree(prefix Prefix) (deleted bool) { + // Nil prefix not allowed. + if prefix == nil { + panic(ErrNilPrefix) + } + + // Empty trie must be handled explicitly. + if trie.prefix == nil { + return false + } + + // Locate the relevant subtree. + parent, root, found, _ := trie.findSubtree(prefix) + if !found { + return false + } + + // If we are in the root of the trie, reset the trie. + if parent == nil { + root.reset() + return true + } + + // Otherwise remove the root node from its parent. + parent.children.remove(root.prefix[0]) + return true +} + +// Internal helper methods ----------------------------------------------------- + +func (trie *Trie) empty() bool { + return trie.item == nil && trie.children.length() == 0 +} + +func (trie *Trie) reset() { + trie.prefix = nil + trie.children = newSparseChildList(trie.maxPrefixPerNode) +} + +func (trie *Trie) put(key Prefix, item Item, replace bool) (inserted bool) { + // Nil prefix not allowed. + if key == nil { + panic(ErrNilPrefix) + } + + var ( + common int + node *Trie = trie + child *Trie + ) + + if node.prefix == nil { + if len(key) <= trie.maxPrefixPerNode { + node.prefix = key + goto InsertItem + } + node.prefix = key[:trie.maxPrefixPerNode] + key = key[trie.maxPrefixPerNode:] + goto AppendChild + } + + for { + // Compute the longest common prefix length. + common = node.longestCommonPrefixLength(key) + key = key[common:] + + // Only a part matches, split. + if common < len(node.prefix) { + goto SplitPrefix + } + + // common == len(node.prefix) since never (common > len(node.prefix)) + // common == len(former key) <-> 0 == len(key) + // -> former key == node.prefix + if len(key) == 0 { + goto InsertItem + } + + // Check children for matching prefix. + child = node.children.next(key[0]) + if child == nil { + goto AppendChild + } + node = child + } + +SplitPrefix: + // Split the prefix if necessary. + child = new(Trie) + *child = *node + *node = *NewTrie() + node.prefix = child.prefix[:common] + child.prefix = child.prefix[common:] + child = child.compact() + node.children = node.children.add(child) + +AppendChild: + // Keep appending children until whole prefix is inserted. + // This loop starts with empty node.prefix that needs to be filled. + for len(key) != 0 { + child := NewTrie() + if len(key) <= trie.maxPrefixPerNode { + child.prefix = key + node.children = node.children.add(child) + node = child + goto InsertItem + } else { + child.prefix = key[:trie.maxPrefixPerNode] + key = key[trie.maxPrefixPerNode:] + node.children = node.children.add(child) + node = child + } + } + +InsertItem: + // Try to insert the item if possible. + if replace || node.item == nil { + node.item = item + return true + } + return false +} + +func (trie *Trie) compact() *Trie { + // Only a node with a single child can be compacted. + if trie.children.length() != 1 { + return trie + } + + child := trie.children.head() + + // If any item is set, we cannot compact since we want to retain + // the ability to do searching by key. This makes compaction less usable, + // but that simply cannot be avoided. + if trie.item != nil || child.item != nil { + return trie + } + + // Make sure the combined prefixes fit into a single node. + if len(trie.prefix)+len(child.prefix) > trie.maxPrefixPerNode { + return trie + } + + // Concatenate the prefixes, move the items. + child.prefix = append(trie.prefix, child.prefix...) + if trie.item != nil { + child.item = trie.item + } + + return child +} + +func (trie *Trie) findSubtree(prefix Prefix) (parent *Trie, root *Trie, found bool, leftover Prefix) { + // Find the subtree matching prefix. + root = trie + for { + // Compute what part of prefix matches. + common := root.longestCommonPrefixLength(prefix) + prefix = prefix[common:] + + // We used up the whole prefix, subtree found. + if len(prefix) == 0 { + found = true + leftover = root.prefix[common:] + return + } + + // Partial match means that there is no subtree matching prefix. + if common < len(root.prefix) { + leftover = root.prefix[common:] + return + } + + // There is some prefix left, move to the children. + child := root.children.next(prefix[0]) + if child == nil { + // There is nowhere to continue, there is no subtree matching prefix. + return + } + + parent = root + root = child + } +} + +func (trie *Trie) findSubtreePath(prefix Prefix) (path []*Trie, found bool, leftover Prefix) { + // Find the subtree matching prefix. + root := trie + var subtreePath []*Trie + for { + // Append the current root to the path. + subtreePath = append(subtreePath, root) + + // Compute what part of prefix matches. + common := root.longestCommonPrefixLength(prefix) + prefix = prefix[common:] + + // We used up the whole prefix, subtree found. + if len(prefix) == 0 { + path = subtreePath + found = true + leftover = root.prefix[common:] + return + } + + // Partial match means that there is no subtree matching prefix. + if common < len(root.prefix) { + leftover = root.prefix[common:] + return + } + + // There is some prefix left, move to the children. + child := root.children.next(prefix[0]) + if child == nil { + // There is nowhere to continue, there is no subtree matching prefix. + return + } + + root = child + } +} + +func (trie *Trie) walk(actualRootPrefix Prefix, visitor VisitorFunc) error { + var prefix Prefix + // Allocate a bit more space for prefix at the beginning. + if actualRootPrefix == nil { + prefix = make(Prefix, 32+len(trie.prefix)) + copy(prefix, trie.prefix) + prefix = prefix[:len(trie.prefix)] + } else { + prefix = make(Prefix, 32+len(actualRootPrefix)) + copy(prefix, actualRootPrefix) + prefix = prefix[:len(actualRootPrefix)] + } + + // Visit the root first. Not that this works for empty trie as well since + // in that case item == nil && len(children) == 0. + if trie.item != nil { + if err := visitor(prefix, trie.item); err != nil { + if err == SkipSubtree { + return nil + } + return err + } + } + + // Then continue to the children. + return trie.children.walk(&prefix, visitor) +} + +func (trie *Trie) longestCommonPrefixLength(prefix Prefix) (i int) { + for ; i < len(prefix) && i < len(trie.prefix) && prefix[i] == trie.prefix[i]; i++ { + } + return +} + +func (trie *Trie) dump() string { + writer := &bytes.Buffer{} + trie.print(writer, 0) + return writer.String() +} + +func (trie *Trie) print(writer io.Writer, indent int) { + fmt.Fprintf(writer, "%s%s %v\n", strings.Repeat(" ", indent), string(trie.prefix), trie.item) + trie.children.print(writer, indent+2) +} + +// Errors ---------------------------------------------------------------------- + +var ( + SkipSubtree = errors.New("Skip this subtree") + ErrNilPrefix = errors.New("Nil prefix passed into a method call") +) diff --git a/vendor/github.com/vbauerster/mpb/LICENSE b/vendor/github.com/vbauerster/mpb/LICENSE new file mode 100644 index 0000000..5e68ed2 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (C) 2016-2018 Vladimir Bauer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vbauerster/mpb/README.md b/vendor/github.com/vbauerster/mpb/README.md new file mode 100644 index 0000000..9b76064 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/README.md @@ -0,0 +1,117 @@ +# Multi Progress Bar + +[![GoDoc](https://godoc.org/github.com/vbauerster/mpb?status.svg)](https://godoc.org/github.com/vbauerster/mpb) +[![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb) +[![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb) +[![codecov](https://codecov.io/gh/vbauerster/mpb/branch/master/graph/badge.svg)](https://codecov.io/gh/vbauerster/mpb) + +**mpb** is a Go lib for rendering progress bars in terminal applications. + +## Features + +* __Multiple Bars__: Multiple progress bars are supported +* __Dynamic Total__: [Set total](https://github.com/vbauerster/mpb/issues/9#issuecomment-344448984) while bar is running +* __Dynamic Add/Remove__: Dynamically add or remove bars +* __Cancellation__: Cancel whole rendering process +* __Predefined Decorators__: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter +* __Decorator's width sync__: Synchronized decorator's width among multiple bars + +## Installation + +```sh +go get github.com/vbauerster/mpb +``` + +_Note:_ it is preferable to go get from github.com, rather than gopkg.in. See issue [#11](https://github.com/vbauerster/mpb/issues/11). + +## Usage + +#### [Rendering single bar](examples/singleBar/main.go) +```go + p := mpb.New( + // override default (80) width + mpb.WithWidth(64), + // override default "[=>-]" format + mpb.WithFormat("╢▌▌░╟"), + // override default 120ms refresh rate + mpb.WithRefreshRate(180*time.Millisecond), + ) + + total := 100 + name := "Single Bar:" + // adding a single bar + bar := p.AddBar(int64(total), + mpb.PrependDecorators( + // display our name with one space on the right + decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), + // replace ETA decorator with "done" message, OnComplete event + decor.OnComplete( + // ETA decorator with ewma age of 60, and width reservation of 4 + decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WC{W: 4}), "done", + ), + ), + mpb.AppendDecorators(decor.Percentage()), + ) + // simulating some work + max := 100 * time.Millisecond + for i := 0; i < total; i++ { + start := time.Now() + time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10) + // ewma based decorators require work duration measurement + bar.IncrBy(1, time.Since(start)) + } + // wait for our bar to complete and flush + p.Wait() +``` + +#### [Rendering multiple bars](examples/simple/main.go) +```go + var wg sync.WaitGroup + p := mpb.New(mpb.WithWaitGroup(&wg)) + total, numBars := 100, 3 + wg.Add(numBars) + + for i := 0; i < numBars; i++ { + name := fmt.Sprintf("Bar#%d:", i) + bar := p.AddBar(int64(total), + mpb.PrependDecorators( + // simple name decorator + decor.Name(name), + // decor.DSyncWidth bit enables column width synchronization + decor.Percentage(decor.WCSyncSpace), + ), + mpb.AppendDecorators( + // replace ETA decorator with "done" message, OnComplete event + decor.OnComplete( + // ETA decorator with ewma age of 60 + decor.EwmaETA(decor.ET_STYLE_GO, 60), "done", + ), + ), + ) + // simulating some work + go func() { + defer wg.Done() + max := 100 * time.Millisecond + for i := 0; i < total; i++ { + start := time.Now() + time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10) + // ewma based decorators require work duration measurement + bar.IncrBy(1, time.Since(start)) + } + }() + } + // wait for all bars to complete and flush + p.Wait() +``` + +#### [Dynamic total](examples/dynTotal/main.go) + +![dynamic total](examples/gifs/godEMrCZmJkHYH1X9dN4Nm0U7.svg) + +#### [Complex example](examples/complex/main.go) + +![complex](examples/gifs/wHzf1M7sd7B3zVa2scBMnjqRf.svg) + +#### [Bytes counters](examples/io/single/main.go) + +![byte counters](examples/gifs/hIpTa3A5rQz65ssiVuRJu87X6.svg) diff --git a/vendor/github.com/vbauerster/mpb/bar.go b/vendor/github.com/vbauerster/mpb/bar.go new file mode 100644 index 0000000..5a506fc --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/bar.go @@ -0,0 +1,455 @@ +package mpb + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + "time" + "unicode/utf8" + + "github.com/vbauerster/mpb/decor" + "github.com/vbauerster/mpb/internal" +) + +const ( + rLeft = iota + rFill + rTip + rEmpty + rRight +) + +const formatLen = 5 + +type barRunes [formatLen]rune + +// Bar represents a progress Bar +type Bar struct { + priority int + index int + + runningBar *Bar + cacheState *bState + operateState chan func(*bState) + int64Ch chan int64 + boolCh chan bool + frameReaderCh chan *frameReader + syncTableCh chan [][]chan int + + // done is closed by Bar's goroutine, after cacheState is written + done chan struct{} + // shutdown is closed from master Progress goroutine only + shutdown chan struct{} +} + +type ( + bState struct { + id int + width int + total int64 + current int64 + runes barRunes + trimLeftSpace bool + trimRightSpace bool + toComplete bool + removeOnComplete bool + barClearOnComplete bool + completeFlushed bool + aDecorators []decor.Decorator + pDecorators []decor.Decorator + amountReceivers []decor.AmountReceiver + shutdownListeners []decor.ShutdownListener + refill *refill + bufP, bufB, bufA *bytes.Buffer + bufNL *bytes.Buffer + panicMsg string + newLineExtendFn func(io.Writer, *decor.Statistics) + + // following options are assigned to the *Bar + priority int + runningBar *Bar + } + refill struct { + char rune + till int64 + } + frameReader struct { + io.Reader + extendedLines int + toShutdown bool + removeOnComplete bool + } +) + +func newBar(wg *sync.WaitGroup, id int, total int64, cancel <-chan struct{}, options ...BarOption) *Bar { + if total <= 0 { + total = time.Now().Unix() + } + + s := &bState{ + id: id, + priority: id, + total: total, + } + + for _, opt := range options { + if opt != nil { + opt(s) + } + } + + s.bufP = bytes.NewBuffer(make([]byte, 0, s.width)) + s.bufB = bytes.NewBuffer(make([]byte, 0, s.width)) + s.bufA = bytes.NewBuffer(make([]byte, 0, s.width)) + + b := &Bar{ + priority: s.priority, + runningBar: s.runningBar, + operateState: make(chan func(*bState)), + int64Ch: make(chan int64), + boolCh: make(chan bool), + frameReaderCh: make(chan *frameReader, 1), + syncTableCh: make(chan [][]chan int), + done: make(chan struct{}), + shutdown: make(chan struct{}), + } + + if b.runningBar != nil { + b.priority = b.runningBar.priority + } + + if s.newLineExtendFn != nil { + s.bufNL = bytes.NewBuffer(make([]byte, 0, s.width)) + } + + go b.serve(wg, s, cancel) + return b +} + +// RemoveAllPrependers removes all prepend functions. +func (b *Bar) RemoveAllPrependers() { + select { + case b.operateState <- func(s *bState) { s.pDecorators = nil }: + case <-b.done: + } +} + +// RemoveAllAppenders removes all append functions. +func (b *Bar) RemoveAllAppenders() { + select { + case b.operateState <- func(s *bState) { s.aDecorators = nil }: + case <-b.done: + } +} + +// ProxyReader wraps r with metrics required for progress tracking. +func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { + if r == nil { + panic("expect io.Reader, got nil") + } + rc, ok := r.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(r) + } + return &proxyReader{rc, b, time.Now()} +} + +// ID returs id of the bar. +func (b *Bar) ID() int { + select { + case b.operateState <- func(s *bState) { b.int64Ch <- int64(s.id) }: + return int(<-b.int64Ch) + case <-b.done: + return b.cacheState.id + } +} + +// Current returns bar's current number, in other words sum of all increments. +func (b *Bar) Current() int64 { + select { + case b.operateState <- func(s *bState) { b.int64Ch <- s.current }: + return <-b.int64Ch + case <-b.done: + return b.cacheState.current + } +} + +// SetTotal sets total dynamically. +// Set final to true, when total is known, it will trigger bar complete event. +func (b *Bar) SetTotal(total int64, final bool) bool { + select { + case b.operateState <- func(s *bState) { + if total > 0 { + s.total = total + } + if final { + s.current = s.total + s.toComplete = true + } + }: + return true + case <-b.done: + return false + } +} + +// SetRefill sets fill rune to r, up until n. +func (b *Bar) SetRefill(n int, r rune) { + if n <= 0 { + return + } + b.operateState <- func(s *bState) { + s.refill = &refill{r, int64(n)} + } +} + +// RefillBy is deprecated, use SetRefill +func (b *Bar) RefillBy(n int, r rune) { + b.SetRefill(n, r) +} + +// Increment is a shorthand for b.IncrBy(1). +func (b *Bar) Increment() { + b.IncrBy(1) +} + +// IncrBy increments progress bar by amount of n. +// wdd is optional work duration i.e. time.Since(start), +// which expected to be provided, if any ewma based decorator is used. +func (b *Bar) IncrBy(n int, wdd ...time.Duration) { + select { + case b.operateState <- func(s *bState) { + s.current += int64(n) + if s.current >= s.total { + s.current = s.total + s.toComplete = true + } + for _, ar := range s.amountReceivers { + ar.NextAmount(n, wdd...) + } + }: + case <-b.done: + } +} + +// Completed reports whether the bar is in completed state. +func (b *Bar) Completed() bool { + // omit select here, because primary usage of the method is for loop + // condition, like for !bar.Completed() {...} + // so when toComplete=true it is called once (at which time, the bar is still alive), + // then quits the loop and never suppose to be called afterwards. + return <-b.boolCh +} + +func (b *Bar) wSyncTable() [][]chan int { + select { + case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }: + return <-b.syncTableCh + case <-b.done: + return b.cacheState.wSyncTable() + } +} + +func (b *Bar) serve(wg *sync.WaitGroup, s *bState, cancel <-chan struct{}) { + defer wg.Done() + for { + select { + case op := <-b.operateState: + op(s) + case b.boolCh <- s.toComplete: + case <-cancel: + s.toComplete = true + cancel = nil + case <-b.shutdown: + b.cacheState = s + close(b.done) + for _, sl := range s.shutdownListeners { + sl.Shutdown() + } + return + } + } +} + +func (b *Bar) render(debugOut io.Writer, tw int) { + select { + case b.operateState <- func(s *bState) { + defer func() { + // recovering if user defined decorator panics for example + if p := recover(); p != nil { + s.panicMsg = fmt.Sprintf("panic: %v", p) + fmt.Fprintf(debugOut, "%s %s bar id %02d %v\n", "[mpb]", time.Now(), s.id, s.panicMsg) + b.frameReaderCh <- &frameReader{ + Reader: strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", tw), s.panicMsg)), + toShutdown: true, + } + } + }() + r := s.draw(tw) + var extendedLines int + if s.newLineExtendFn != nil { + s.bufNL.Reset() + s.newLineExtendFn(s.bufNL, newStatistics(s)) + extendedLines = countLines(s.bufNL.Bytes()) + r = io.MultiReader(r, s.bufNL) + } + b.frameReaderCh <- &frameReader{ + Reader: r, + extendedLines: extendedLines, + toShutdown: s.toComplete && !s.completeFlushed, + removeOnComplete: s.removeOnComplete, + } + s.completeFlushed = s.toComplete + }: + case <-b.done: + s := b.cacheState + r := s.draw(tw) + var extendedLines int + if s.newLineExtendFn != nil { + s.bufNL.Reset() + s.newLineExtendFn(s.bufNL, newStatistics(s)) + extendedLines = countLines(s.bufNL.Bytes()) + r = io.MultiReader(r, s.bufNL) + } + b.frameReaderCh <- &frameReader{ + Reader: r, + extendedLines: extendedLines, + } + } +} + +func (s *bState) draw(termWidth int) io.Reader { + defer s.bufA.WriteByte('\n') + + if s.panicMsg != "" { + return strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%ds\n", termWidth), s.panicMsg)) + } + + stat := newStatistics(s) + + for _, d := range s.pDecorators { + s.bufP.WriteString(d.Decor(stat)) + } + + for _, d := range s.aDecorators { + s.bufA.WriteString(d.Decor(stat)) + } + + prependCount := utf8.RuneCount(s.bufP.Bytes()) + appendCount := utf8.RuneCount(s.bufA.Bytes()) + + if s.barClearOnComplete && s.completeFlushed { + return io.MultiReader(s.bufP, s.bufA) + } + + s.fillBar(s.width) + barCount := utf8.RuneCount(s.bufB.Bytes()) + totalCount := prependCount + barCount + appendCount + if spaceCount := 0; totalCount > termWidth { + if !s.trimLeftSpace { + spaceCount++ + } + if !s.trimRightSpace { + spaceCount++ + } + s.fillBar(termWidth - prependCount - appendCount - spaceCount) + } + + return io.MultiReader(s.bufP, s.bufB, s.bufA) +} + +func (s *bState) fillBar(width int) { + defer func() { + s.bufB.WriteRune(s.runes[rRight]) + if !s.trimRightSpace { + s.bufB.WriteByte(' ') + } + }() + + s.bufB.Reset() + if !s.trimLeftSpace { + s.bufB.WriteByte(' ') + } + s.bufB.WriteRune(s.runes[rLeft]) + if width <= 2 { + return + } + + // bar s.width without leftEnd and rightEnd runes + barWidth := width - 2 + + completedWidth := internal.Percentage(s.total, s.current, int64(barWidth)) + + if s.refill != nil { + till := internal.Percentage(s.total, s.refill.till, int64(barWidth)) + // append refill rune + var i int64 + for i = 0; i < till; i++ { + s.bufB.WriteRune(s.refill.char) + } + for i = till; i < completedWidth; i++ { + s.bufB.WriteRune(s.runes[rFill]) + } + } else { + var i int64 + for i = 0; i < completedWidth; i++ { + s.bufB.WriteRune(s.runes[rFill]) + } + } + + if completedWidth < int64(barWidth) && completedWidth > 0 { + _, size := utf8.DecodeLastRune(s.bufB.Bytes()) + s.bufB.Truncate(s.bufB.Len() - size) + s.bufB.WriteRune(s.runes[rTip]) + } + + for i := completedWidth; i < int64(barWidth); i++ { + s.bufB.WriteRune(s.runes[rEmpty]) + } +} + +func (s *bState) wSyncTable() [][]chan int { + columns := make([]chan int, 0, len(s.pDecorators)+len(s.aDecorators)) + var pCount int + for _, d := range s.pDecorators { + if ok, ch := d.Syncable(); ok { + columns = append(columns, ch) + pCount++ + } + } + var aCount int + for _, d := range s.aDecorators { + if ok, ch := d.Syncable(); ok { + columns = append(columns, ch) + aCount++ + } + } + table := make([][]chan int, 2) + table[0] = columns[0:pCount] + table[1] = columns[pCount : pCount+aCount : pCount+aCount] + return table +} + +func newStatistics(s *bState) *decor.Statistics { + return &decor.Statistics{ + ID: s.id, + Completed: s.completeFlushed, + Total: s.total, + Current: s.current, + } +} + +func strToBarRunes(format string) (array barRunes) { + for i, n := 0, 0; len(format) > 0; i++ { + array[i], n = utf8.DecodeRuneInString(format) + format = format[n:] + } + return +} + +func countLines(b []byte) int { + return bytes.Count(b, []byte("\n")) +} diff --git a/vendor/github.com/vbauerster/mpb/bar_option.go b/vendor/github.com/vbauerster/mpb/bar_option.go new file mode 100644 index 0000000..e33bce4 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/bar_option.go @@ -0,0 +1,124 @@ +package mpb + +import ( + "io" + + "github.com/vbauerster/mpb/decor" +) + +// BarOption is a function option which changes the default behavior of a bar, +// if passed to p.AddBar(int64, ...BarOption) +type BarOption func(*bState) + +// AppendDecorators let you inject decorators to the bar's right side +func AppendDecorators(appenders ...decor.Decorator) BarOption { + return func(s *bState) { + for _, decorator := range appenders { + if ar, ok := decorator.(decor.AmountReceiver); ok { + s.amountReceivers = append(s.amountReceivers, ar) + } + if sl, ok := decorator.(decor.ShutdownListener); ok { + s.shutdownListeners = append(s.shutdownListeners, sl) + } + s.aDecorators = append(s.aDecorators, decorator) + } + } +} + +// PrependDecorators let you inject decorators to the bar's left side +func PrependDecorators(prependers ...decor.Decorator) BarOption { + return func(s *bState) { + for _, decorator := range prependers { + if ar, ok := decorator.(decor.AmountReceiver); ok { + s.amountReceivers = append(s.amountReceivers, ar) + } + if sl, ok := decorator.(decor.ShutdownListener); ok { + s.shutdownListeners = append(s.shutdownListeners, sl) + } + s.pDecorators = append(s.pDecorators, decorator) + } + } +} + +// BarTrimLeft trims left side space of the bar +func BarTrimLeft() BarOption { + return func(s *bState) { + s.trimLeftSpace = true + } +} + +// BarTrimRight trims right space of the bar +func BarTrimRight() BarOption { + return func(s *bState) { + s.trimRightSpace = true + } +} + +// BarTrim trims both left and right spaces of the bar +func BarTrim() BarOption { + return func(s *bState) { + s.trimLeftSpace = true + s.trimRightSpace = true + } +} + +// BarID overwrites internal bar id +func BarID(id int) BarOption { + return func(s *bState) { + s.id = id + } +} + +// BarRemoveOnComplete is a flag, if set whole bar line will be removed on complete event. +// If both BarRemoveOnComplete and BarClearOnComplete are set, first bar section gets cleared +// and then whole bar line gets removed completely. +func BarRemoveOnComplete() BarOption { + return func(s *bState) { + s.removeOnComplete = true + } +} + +// BarReplaceOnComplete is indicator for delayed bar start, after the `runningBar` is complete. +// To achieve bar replacement effect, `runningBar` should has its `BarRemoveOnComplete` option set. +func BarReplaceOnComplete(runningBar *Bar) BarOption { + return func(s *bState) { + s.runningBar = runningBar + } +} + +// BarClearOnComplete is a flag, if set will clear bar section on complete event. +// If you need to remove a whole bar line, refer to BarRemoveOnComplete. +func BarClearOnComplete() BarOption { + return func(s *bState) { + s.barClearOnComplete = true + } +} + +// BarPriority sets bar's priority. +// Zero is highest priority, i.e. bar will be on top. +// If `BarReplaceOnComplete` option is supplied, this option is ignored. +func BarPriority(priority int) BarOption { + return func(s *bState) { + s.priority = priority + } +} + +// BarNewLineExtend takes user defined efn, which gets called each render cycle. +// Any write to provided writer of efn, will appear on new line of respective bar. +func BarNewLineExtend(efn func(io.Writer, *decor.Statistics)) BarOption { + return func(s *bState) { + s.newLineExtendFn = efn + } +} + +func barWidth(w int) BarOption { + return func(s *bState) { + s.width = w + } +} + +func barFormat(format string) BarOption { + return func(s *bState) { + s.runes = strToBarRunes(format) + } +} diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/cwriter/writer.go new file mode 100644 index 0000000..0b1470d --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/cwriter/writer.go @@ -0,0 +1,78 @@ +package cwriter + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + + isatty "github.com/mattn/go-isatty" + "golang.org/x/crypto/ssh/terminal" +) + +// ESC is the ASCII code for escape character +const ESC = 27 + +var NotATTY = errors.New("not a terminal") + +var ( + cursorUp = fmt.Sprintf("%c[%dA", ESC, 1) + clearLine = fmt.Sprintf("%c[2K\r", ESC) + clearCursorAndLine = cursorUp + clearLine +) + +// Writer is a buffered the writer that updates the terminal. +// The contents of writer will be flushed when Flush is called. +type Writer struct { + out io.Writer + buf bytes.Buffer + isTerminal bool + fd int + lineCount int +} + +// New returns a new Writer with defaults +func New(out io.Writer) *Writer { + w := &Writer{out: out} + if f, ok := out.(*os.File); ok { + fd := f.Fd() + w.isTerminal = isatty.IsTerminal(fd) + w.fd = int(fd) + } + return w +} + +// Flush flushes the underlying buffer +func (w *Writer) Flush(lineCount int) error { + err := w.clearLines() + w.lineCount = lineCount + // WriteTo takes care of w.buf.Reset + if _, e := w.buf.WriteTo(w.out); err == nil { + err = e + } + return err +} + +// Write appends the contents of p to the underlying buffer +func (w *Writer) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// WriteString writes string to the underlying buffer +func (w *Writer) WriteString(s string) (n int, err error) { + return w.buf.WriteString(s) +} + +// ReadFrom reads from the provided io.Reader and writes to the underlying buffer. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + return w.buf.ReadFrom(r) +} + +func (w *Writer) GetWidth() (int, error) { + if w.isTerminal { + tw, _, err := terminal.GetSize(w.fd) + return tw, err + } + return -1, NotATTY +} diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go new file mode 100644 index 0000000..05e31c4 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/cwriter/writer_posix.go @@ -0,0 +1,13 @@ +// +build !windows + +package cwriter + +import ( + "io" + "strings" +) + +func (w *Writer) clearLines() error { + _, err := io.WriteString(w.out, strings.Repeat(clearCursorAndLine, w.lineCount)) + return err +} diff --git a/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go new file mode 100644 index 0000000..dad7f50 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/cwriter/writer_windows.go @@ -0,0 +1,77 @@ +// +build windows + +package cwriter + +import ( + "io" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") +) + +type ( + short int16 + word uint16 + dword uint32 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +// FdWriter is a writer with a file descriptor. +type FdWriter interface { + io.Writer + Fd() uintptr +} + +func (w *Writer) clearLines() error { + f, ok := w.out.(FdWriter) + if ok && !isatty.IsTerminal(f.Fd()) { + _, err := io.WriteString(w.out, strings.Repeat(clearCursorAndLine, w.lineCount)) + return err + } + fd := f.Fd() + var info consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&info))) + + for i := 0; i < w.lineCount; i++ { + // move the cursor up + info.cursorPosition.y-- + procSetConsoleCursorPosition.Call(fd, uintptr(*(*int32)(unsafe.Pointer(&info.cursorPosition)))) + // clear the line + cursor := coord{ + x: info.window.left, + y: info.window.top + info.cursorPosition.y, + } + var count, w dword + count = dword(info.size.x) + procFillConsoleOutputCharacter.Call(fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w))) + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/decor/counters.go b/vendor/github.com/vbauerster/mpb/decor/counters.go new file mode 100644 index 0000000..e4161dc --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/counters.go @@ -0,0 +1,206 @@ +package decor + +import ( + "fmt" + "io" + "strconv" + "strings" +) + +const ( + _ = iota + KiB = 1 << (iota * 10) + MiB + GiB + TiB +) + +const ( + KB = 1000 + MB = KB * 1000 + GB = MB * 1000 + TB = GB * 1000 +) + +const ( + _ = iota + UnitKiB + UnitKB +) + +type CounterKiB int64 + +func (c CounterKiB) Format(st fmt.State, verb rune) { + prec, ok := st.Precision() + + if verb == 'd' || !ok { + prec = 0 + } + if verb == 'f' && !ok { + prec = 6 + } + // retain old beahavior if s verb used + if verb == 's' { + prec = 1 + } + + var res, unit string + switch { + case c >= TiB: + unit = "TiB" + res = strconv.FormatFloat(float64(c)/TiB, 'f', prec, 64) + case c >= GiB: + unit = "GiB" + res = strconv.FormatFloat(float64(c)/GiB, 'f', prec, 64) + case c >= MiB: + unit = "MiB" + res = strconv.FormatFloat(float64(c)/MiB, 'f', prec, 64) + case c >= KiB: + unit = "KiB" + res = strconv.FormatFloat(float64(c)/KiB, 'f', prec, 64) + default: + unit = "b" + res = strconv.FormatInt(int64(c), 10) + } + + if st.Flag(' ') { + res += " " + } + res += unit + + if w, ok := st.Width(); ok { + if len(res) < w { + pad := strings.Repeat(" ", w-len(res)) + if st.Flag(int('-')) { + res += pad + } else { + res = pad + res + } + } + } + + io.WriteString(st, res) +} + +type CounterKB int64 + +func (c CounterKB) Format(st fmt.State, verb rune) { + prec, ok := st.Precision() + + if verb == 'd' || !ok { + prec = 0 + } + if verb == 'f' && !ok { + prec = 6 + } + // retain old beahavior if s verb used + if verb == 's' { + prec = 1 + } + + var res, unit string + switch { + case c >= TB: + unit = "TB" + res = strconv.FormatFloat(float64(c)/TB, 'f', prec, 64) + case c >= GB: + unit = "GB" + res = strconv.FormatFloat(float64(c)/GB, 'f', prec, 64) + case c >= MB: + unit = "MB" + res = strconv.FormatFloat(float64(c)/MB, 'f', prec, 64) + case c >= KB: + unit = "kB" + res = strconv.FormatFloat(float64(c)/KB, 'f', prec, 64) + default: + unit = "b" + res = strconv.FormatInt(int64(c), 10) + } + + if st.Flag(' ') { + res += " " + } + res += unit + + if w, ok := st.Width(); ok { + if len(res) < w { + pad := strings.Repeat(" ", w-len(res)) + if st.Flag(int('-')) { + res += pad + } else { + res = pad + res + } + } + } + + io.WriteString(st, res) +} + +// CountersNoUnit is a wrapper around Counters with no unit param. +func CountersNoUnit(pairFormat string, wcc ...WC) Decorator { + return Counters(0, pairFormat, wcc...) +} + +// CountersKibiByte is a wrapper around Counters with predefined unit UnitKiB (bytes/1024). +func CountersKibiByte(pairFormat string, wcc ...WC) Decorator { + return Counters(UnitKiB, pairFormat, wcc...) +} + +// CountersKiloByte is a wrapper around Counters with predefined unit UnitKB (bytes/1000). +func CountersKiloByte(pairFormat string, wcc ...WC) Decorator { + return Counters(UnitKB, pairFormat, wcc...) +} + +// Counters decorator with dynamic unit measure adjustment. +// +// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// +// `pairFormat` printf compatible verbs for current and total, like "%f" or "%d" +// +// `wcc` optional WC config +// +// pairFormat example if UnitKB is chosen: +// +// "%.1f / %.1f" = "1.0MB / 12.0MB" or "% .1f / % .1f" = "1.0 MB / 12.0 MB" +func Counters(unit int, pairFormat string, wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &countersDecorator{ + WC: wc, + unit: unit, + pairFormat: pairFormat, + } + return d +} + +type countersDecorator struct { + WC + unit int + pairFormat string + completeMsg *string +} + +func (d *countersDecorator) Decor(st *Statistics) string { + if st.Completed && d.completeMsg != nil { + return d.FormatMsg(*d.completeMsg) + } + + var str string + switch d.unit { + case UnitKiB: + str = fmt.Sprintf(d.pairFormat, CounterKiB(st.Current), CounterKiB(st.Total)) + case UnitKB: + str = fmt.Sprintf(d.pairFormat, CounterKB(st.Current), CounterKB(st.Total)) + default: + str = fmt.Sprintf(d.pairFormat, st.Current, st.Total) + } + + return d.FormatMsg(str) +} + +func (d *countersDecorator) OnCompleteMessage(msg string) { + d.completeMsg = &msg +} diff --git a/vendor/github.com/vbauerster/mpb/decor/decorator.go b/vendor/github.com/vbauerster/mpb/decor/decorator.go new file mode 100644 index 0000000..6aaf6c8 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/decorator.go @@ -0,0 +1,144 @@ +package decor + +import ( + "fmt" + "time" + "unicode/utf8" +) + +const ( + // DidentRight bit specifies identation direction. + // |foo |b | With DidentRight + // | foo| b| Without DidentRight + DidentRight = 1 << iota + + // DextraSpace bit adds extra space, makes sense with DSyncWidth only. + // When DidentRight bit set, the space will be added to the right, + // otherwise to the left. + DextraSpace + + // DSyncWidth bit enables same column width synchronization. + // Effective with multiple bars only. + DSyncWidth + + // DSyncWidthR is shortcut for DSyncWidth|DidentRight + DSyncWidthR = DSyncWidth | DidentRight + + // DSyncSpace is shortcut for DSyncWidth|DextraSpace + DSyncSpace = DSyncWidth | DextraSpace + + // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight + DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight +) + +const ( + ET_STYLE_GO = iota + ET_STYLE_HHMMSS + ET_STYLE_HHMM + ET_STYLE_MMSS +) + +// Statistics is a struct, which gets passed to a Decorator. +type Statistics struct { + ID int + Completed bool + Total int64 + Current int64 +} + +// Decorator interface. +// A decorator must implement this interface, in order to be used with mpb library. +type Decorator interface { + Decor(*Statistics) string + Syncable +} + +// Syncable interface. +// All decorators implement this interface implicitly. +// Its Syncable method exposes width sync channel, if sync is enabled. +type Syncable interface { + Syncable() (bool, chan int) +} + +// OnCompleteMessenger interface. +// Decorators implementing this interface suppose to return provided string on complete event. +type OnCompleteMessenger interface { + OnCompleteMessage(string) +} + +// AmountReceiver interface. +// If decorator needs to receive increment amount, +// so this is the right interface to implement. +type AmountReceiver interface { + NextAmount(int, ...time.Duration) +} + +// ShutdownListener interface. +// If decorator needs to be notified once upon bar shutdown event, +// so this is the right interface to implement. +type ShutdownListener interface { + Shutdown() +} + +// Global convenience shortcuts +var ( + WCSyncWidth = WC{C: DSyncWidth} + WCSyncWidthR = WC{C: DSyncWidthR} + WCSyncSpace = WC{C: DSyncSpace} + WCSyncSpaceR = WC{C: DSyncSpaceR} +) + +// WC is a struct with two public fields W and C, both of int type. +// W represents width and C represents bit set of width related config. +type WC struct { + W int + C int + format string + wsync chan int +} + +// FormatMsg formats final message according to WC.W and WC.C. +// Should be called by any Decorator implementation. +func (wc WC) FormatMsg(msg string) string { + if (wc.C & DSyncWidth) != 0 { + wc.wsync <- utf8.RuneCountInString(msg) + max := <-wc.wsync + if max == 0 { + max = wc.W + } + if (wc.C & DextraSpace) != 0 { + max++ + } + return fmt.Sprintf(fmt.Sprintf(wc.format, max), msg) + } + return fmt.Sprintf(fmt.Sprintf(wc.format, wc.W), msg) +} + +// Init initializes width related config. +func (wc *WC) Init() { + wc.format = "%%" + if (wc.C & DidentRight) != 0 { + wc.format += "-" + } + wc.format += "%ds" + if (wc.C & DSyncWidth) != 0 { + wc.wsync = make(chan int) + } +} + +func (wc *WC) Syncable() (bool, chan int) { + return (wc.C & DSyncWidth) != 0, wc.wsync +} + +// OnComplete returns decorator, which wraps provided decorator, with sole +// purpose to display provided message on complete event. +// +// `decorator` Decorator to wrap +// +// `message` message to display on complete event +func OnComplete(decorator Decorator, message string) Decorator { + if d, ok := decorator.(OnCompleteMessenger); ok { + d.OnCompleteMessage(message) + } + return decorator +} diff --git a/vendor/github.com/vbauerster/mpb/decor/doc.go b/vendor/github.com/vbauerster/mpb/decor/doc.go new file mode 100644 index 0000000..561a867 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/doc.go @@ -0,0 +1,25 @@ +// Copyright (C) 2016-2018 Vladimir Bauer +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package decor contains common decorators used by "github.com/vbauerster/mpb" package. + + Some decorators returned by this package might have a closure state. It is ok to use + decorators concurrently, unless you share the same decorator among multiple + *mpb.Bar instances. To avoid data races, create new decorator per *mpb.Bar instance. + + Don't: + + p := mpb.New() + name := decor.Name("bar") + p.AddBar(100, mpb.AppendDecorators(name)) + p.AddBar(100, mpb.AppendDecorators(name)) + + Do: + + p := mpb.New() + p.AddBar(100, mpb.AppendDecorators(decor.Name("bar1"))) + p.AddBar(100, mpb.AppendDecorators(decor.Name("bar2"))) +*/ +package decor diff --git a/vendor/github.com/vbauerster/mpb/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/decor/elapsed.go new file mode 100644 index 0000000..649d40a --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/elapsed.go @@ -0,0 +1,68 @@ +package decor + +import ( + "fmt" + "time" +) + +// Elapsed returns elapsed time decorator. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `wcc` optional WC config +func Elapsed(style int, wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &elapsedDecorator{ + WC: wc, + style: style, + startTime: time.Now(), + } + return d +} + +type elapsedDecorator struct { + WC + style int + startTime time.Time + msg string + completeMsg *string +} + +func (d *elapsedDecorator) Decor(st *Statistics) string { + if st.Completed { + if d.completeMsg != nil { + return d.FormatMsg(*d.completeMsg) + } + return d.FormatMsg(d.msg) + } + + timeElapsed := time.Since(d.startTime) + hours := int64((timeElapsed / time.Hour) % 60) + minutes := int64((timeElapsed / time.Minute) % 60) + seconds := int64((timeElapsed / time.Second) % 60) + + switch d.style { + case ET_STYLE_GO: + d.msg = fmt.Sprint(time.Duration(timeElapsed.Seconds()) * time.Second) + case ET_STYLE_HHMMSS: + d.msg = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + case ET_STYLE_HHMM: + d.msg = fmt.Sprintf("%02d:%02d", hours, minutes) + case ET_STYLE_MMSS: + if hours > 0 { + d.msg = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + } else { + d.msg = fmt.Sprintf("%02d:%02d", minutes, seconds) + } + } + + return d.FormatMsg(d.msg) +} + +func (d *elapsedDecorator) OnCompleteMessage(msg string) { + d.completeMsg = &msg +} diff --git a/vendor/github.com/vbauerster/mpb/decor/eta.go b/vendor/github.com/vbauerster/mpb/decor/eta.go new file mode 100644 index 0000000..44a1f03 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/eta.go @@ -0,0 +1,207 @@ +package decor + +import ( + "fmt" + "math" + "time" + + "github.com/VividCortex/ewma" + "github.com/vbauerster/mpb/internal" +) + +type TimeNormalizer func(time.Duration) time.Duration + +// EwmaETA exponential-weighted-moving-average based ETA decorator. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `age` is the previous N samples to average over. +// +// `wcc` optional WC config +func EwmaETA(style int, age float64, wcc ...WC) Decorator { + return MovingAverageETA(style, ewma.NewMovingAverage(age), NopNormalizer(), wcc...) +} + +// MovingAverageETA decorator relies on MovingAverage implementation to calculate its average. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `average` available implementations of MovingAverage [ewma.MovingAverage|NewMedian|NewMedianEwma] +// +// `normalizer` available implementations are [NopNormalizer|FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] +// +// `wcc` optional WC config +func MovingAverageETA(style int, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &movingAverageETA{ + WC: wc, + style: style, + average: average, + normalizer: normalizer, + } + return d +} + +type movingAverageETA struct { + WC + style int + average ewma.MovingAverage + completeMsg *string + normalizer TimeNormalizer +} + +func (d *movingAverageETA) Decor(st *Statistics) string { + if st.Completed && d.completeMsg != nil { + return d.FormatMsg(*d.completeMsg) + } + + v := internal.Round(d.average.Value()) + remaining := d.normalizer(time.Duration((st.Total - st.Current) * int64(v))) + hours := int64((remaining / time.Hour) % 60) + minutes := int64((remaining / time.Minute) % 60) + seconds := int64((remaining / time.Second) % 60) + + var str string + switch d.style { + case ET_STYLE_GO: + str = fmt.Sprint(time.Duration(remaining.Seconds()) * time.Second) + case ET_STYLE_HHMMSS: + str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + case ET_STYLE_HHMM: + str = fmt.Sprintf("%02d:%02d", hours, minutes) + case ET_STYLE_MMSS: + if hours > 0 { + str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + } else { + str = fmt.Sprintf("%02d:%02d", minutes, seconds) + } + } + + return d.FormatMsg(str) +} + +func (d *movingAverageETA) NextAmount(n int, wdd ...time.Duration) { + var workDuration time.Duration + for _, wd := range wdd { + workDuration = wd + } + lastItemEstimate := float64(workDuration) / float64(n) + if math.IsInf(lastItemEstimate, 0) || math.IsNaN(lastItemEstimate) { + return + } + d.average.Add(lastItemEstimate) +} + +func (d *movingAverageETA) OnCompleteMessage(msg string) { + d.completeMsg = &msg +} + +// AverageETA decorator. +// +// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] +// +// `wcc` optional WC config +func AverageETA(style int, wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &averageETA{ + WC: wc, + style: style, + startTime: time.Now(), + } + return d +} + +type averageETA struct { + WC + style int + startTime time.Time + completeMsg *string +} + +func (d *averageETA) Decor(st *Statistics) string { + if st.Completed && d.completeMsg != nil { + return d.FormatMsg(*d.completeMsg) + } + + var str string + timeElapsed := time.Since(d.startTime) + v := internal.Round(float64(timeElapsed) / float64(st.Current)) + if math.IsInf(v, 0) || math.IsNaN(v) { + v = 0 + } + remaining := time.Duration((st.Total - st.Current) * int64(v)) + hours := int64((remaining / time.Hour) % 60) + minutes := int64((remaining / time.Minute) % 60) + seconds := int64((remaining / time.Second) % 60) + + switch d.style { + case ET_STYLE_GO: + str = fmt.Sprint(time.Duration(remaining.Seconds()) * time.Second) + case ET_STYLE_HHMMSS: + str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + case ET_STYLE_HHMM: + str = fmt.Sprintf("%02d:%02d", hours, minutes) + case ET_STYLE_MMSS: + if hours > 0 { + str = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) + } else { + str = fmt.Sprintf("%02d:%02d", minutes, seconds) + } + } + + return d.FormatMsg(str) +} + +func (d *averageETA) OnCompleteMessage(msg string) { + d.completeMsg = &msg +} + +func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer { + var normalized time.Duration + var lastCall time.Time + return func(remaining time.Duration) time.Duration { + if diff := normalized - remaining; diff <= 0 || diff > maxTolerate || remaining < maxTolerate/2 { + normalized = remaining + lastCall = time.Now() + return remaining + } + normalized -= time.Since(lastCall) + lastCall = time.Now() + return normalized + } +} + +func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer { + var normalized time.Duration + var lastCall time.Time + var count int + return func(remaining time.Duration) time.Duration { + if count == 0 || remaining <= time.Duration(15*time.Second) { + count = updInterval + normalized = remaining + lastCall = time.Now() + return remaining + } + count-- + normalized -= time.Since(lastCall) + lastCall = time.Now() + if normalized > 0 { + return normalized + } + return remaining + } +} + +func NopNormalizer() TimeNormalizer { + return func(remaining time.Duration) time.Duration { + return remaining + } +} diff --git a/vendor/github.com/vbauerster/mpb/decor/moving-average.go b/vendor/github.com/vbauerster/mpb/decor/moving-average.go new file mode 100644 index 0000000..f9596a2 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/moving-average.go @@ -0,0 +1,66 @@ +package decor + +import ( + "sort" + + "github.com/VividCortex/ewma" +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +type medianWindow [3]float64 + +func (s *medianWindow) Len() int { return len(s) } +func (s *medianWindow) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s *medianWindow) Less(i, j int) bool { return s[i] < s[j] } + +func (s *medianWindow) Add(value float64) { + s[0], s[1] = s[1], s[2] + s[2] = value +} + +func (s *medianWindow) Value() float64 { + tmp := *s + sort.Sort(&tmp) + return tmp[1] +} + +func (s *medianWindow) Set(value float64) { + for i := 0; i < len(s); i++ { + s[i] = value + } +} + +// NewMedian is fixed last 3 samples median MovingAverage. +func NewMedian() MovingAverage { + return new(medianWindow) +} + +type medianEwma struct { + count uint + median MovingAverage + MovingAverage +} + +func (s *medianEwma) Add(v float64) { + s.median.Add(v) + if s.count >= 2 { + s.MovingAverage.Add(s.median.Value()) + } + s.count++ +} + +// NewMedianEwma is ewma based MovingAverage, which gets its values from median MovingAverage. +func NewMedianEwma(age ...float64) MovingAverage { + return &medianEwma{ + MovingAverage: ewma.NewMovingAverage(age...), + median: NewMedian(), + } +} diff --git a/vendor/github.com/vbauerster/mpb/decor/name.go b/vendor/github.com/vbauerster/mpb/decor/name.go new file mode 100644 index 0000000..a5a5d14 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/name.go @@ -0,0 +1,45 @@ +package decor + +// StaticName returns name decorator. +// +// `name` string to display +// +// `wcc` optional WC config +func StaticName(name string, wcc ...WC) Decorator { + return Name(name, wcc...) +} + +// Name returns name decorator. +// +// `name` string to display +// +// `wcc` optional WC config +func Name(name string, wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &nameDecorator{ + WC: wc, + msg: name, + } + return d +} + +type nameDecorator struct { + WC + msg string + complete *string +} + +func (d *nameDecorator) Decor(st *Statistics) string { + if st.Completed && d.complete != nil { + return d.FormatMsg(*d.complete) + } + return d.FormatMsg(d.msg) +} + +func (d *nameDecorator) OnCompleteMessage(msg string) { + d.complete = &msg +} diff --git a/vendor/github.com/vbauerster/mpb/decor/percentage.go b/vendor/github.com/vbauerster/mpb/decor/percentage.go new file mode 100644 index 0000000..078fbcf --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/percentage.go @@ -0,0 +1,39 @@ +package decor + +import ( + "fmt" + + "github.com/vbauerster/mpb/internal" +) + +// Percentage returns percentage decorator. +// +// `wcc` optional WC config +func Percentage(wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &percentageDecorator{ + WC: wc, + } + return d +} + +type percentageDecorator struct { + WC + completeMsg *string +} + +func (d *percentageDecorator) Decor(st *Statistics) string { + if st.Completed && d.completeMsg != nil { + return d.FormatMsg(*d.completeMsg) + } + str := fmt.Sprintf("%d %%", internal.Percentage(st.Total, st.Current, 100)) + return d.FormatMsg(str) +} + +func (d *percentageDecorator) OnCompleteMessage(msg string) { + d.completeMsg = &msg +} diff --git a/vendor/github.com/vbauerster/mpb/decor/speed.go b/vendor/github.com/vbauerster/mpb/decor/speed.go new file mode 100644 index 0000000..395e5d0 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/decor/speed.go @@ -0,0 +1,270 @@ +package decor + +import ( + "fmt" + "io" + "math" + "strconv" + "strings" + "time" + + "github.com/VividCortex/ewma" +) + +type SpeedKiB float64 + +func (s SpeedKiB) Format(st fmt.State, verb rune) { + prec, ok := st.Precision() + + if verb == 'd' || !ok { + prec = 0 + } + if verb == 'f' && !ok { + prec = 6 + } + // retain old beahavior if s verb used + if verb == 's' { + prec = 1 + } + + var res, unit string + switch { + case s >= TiB: + unit = "TiB/s" + res = strconv.FormatFloat(float64(s)/TiB, 'f', prec, 64) + case s >= GiB: + unit = "GiB/s" + res = strconv.FormatFloat(float64(s)/GiB, 'f', prec, 64) + case s >= MiB: + unit = "MiB/s" + res = strconv.FormatFloat(float64(s)/MiB, 'f', prec, 64) + case s >= KiB: + unit = "KiB/s" + res = strconv.FormatFloat(float64(s)/KiB, 'f', prec, 64) + default: + unit = "b/s" + res = strconv.FormatInt(int64(s), 10) + } + + if st.Flag(' ') { + res += " " + } + res += unit + + if w, ok := st.Width(); ok { + if len(res) < w { + pad := strings.Repeat(" ", w-len(res)) + if st.Flag(int('-')) { + res += pad + } else { + res = pad + res + } + } + } + + io.WriteString(st, res) +} + +type SpeedKB float64 + +func (s SpeedKB) Format(st fmt.State, verb rune) { + prec, ok := st.Precision() + + if verb == 'd' || !ok { + prec = 0 + } + if verb == 'f' && !ok { + prec = 6 + } + // retain old beahavior if s verb used + if verb == 's' { + prec = 1 + } + + var res, unit string + switch { + case s >= TB: + unit = "TB/s" + res = strconv.FormatFloat(float64(s)/TB, 'f', prec, 64) + case s >= GB: + unit = "GB/s" + res = strconv.FormatFloat(float64(s)/GB, 'f', prec, 64) + case s >= MB: + unit = "MB/s" + res = strconv.FormatFloat(float64(s)/MB, 'f', prec, 64) + case s >= KB: + unit = "kB/s" + res = strconv.FormatFloat(float64(s)/KB, 'f', prec, 64) + default: + unit = "b/s" + res = strconv.FormatInt(int64(s), 10) + } + + if st.Flag(' ') { + res += " " + } + res += unit + + if w, ok := st.Width(); ok { + if len(res) < w { + pad := strings.Repeat(" ", w-len(res)) + if st.Flag(int('-')) { + res += pad + } else { + res = pad + res + } + } + } + + io.WriteString(st, res) +} + +// EwmaSpeed exponential-weighted-moving-average based speed decorator, +// with dynamic unit measure adjustment. +// +// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// +// `unitFormat` printf compatible verb for value, like "%f" or "%d" +// +// `average` MovingAverage implementation +// +// `wcc` optional WC config +// +// unitFormat example if UnitKiB is chosen: +// +// "%.1f" = "1.0MiB/s" or "% .1f" = "1.0 MiB/s" +func EwmaSpeed(unit int, unitFormat string, age float64, wcc ...WC) Decorator { + return MovingAverageSpeed(unit, unitFormat, ewma.NewMovingAverage(age), wcc...) +} + +// MovingAverageSpeed decorator relies on MovingAverage implementation to calculate its average. +// +// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// +// `unitFormat` printf compatible verb for value, like "%f" or "%d" +// +// `average` MovingAverage implementation +// +// `wcc` optional WC config +func MovingAverageSpeed(unit int, unitFormat string, average MovingAverage, wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &movingAverageSpeed{ + WC: wc, + unit: unit, + unitFormat: unitFormat, + average: average, + } + return d +} + +type movingAverageSpeed struct { + WC + unit int + unitFormat string + average ewma.MovingAverage + msg string + completeMsg *string +} + +func (d *movingAverageSpeed) Decor(st *Statistics) string { + if st.Completed { + if d.completeMsg != nil { + return d.FormatMsg(*d.completeMsg) + } + return d.FormatMsg(d.msg) + } + + speed := d.average.Value() + switch d.unit { + case UnitKiB: + d.msg = fmt.Sprintf(d.unitFormat, SpeedKiB(speed)) + case UnitKB: + d.msg = fmt.Sprintf(d.unitFormat, SpeedKB(speed)) + default: + d.msg = fmt.Sprintf(d.unitFormat, speed) + } + + return d.FormatMsg(d.msg) +} + +func (s *movingAverageSpeed) NextAmount(n int, wdd ...time.Duration) { + var workDuration time.Duration + for _, wd := range wdd { + workDuration = wd + } + speed := float64(n) / workDuration.Seconds() / 1000 + if math.IsInf(speed, 0) || math.IsNaN(speed) { + return + } + s.average.Add(speed) +} + +func (d *movingAverageSpeed) OnCompleteMessage(msg string) { + d.completeMsg = &msg +} + +// AverageSpeed decorator with dynamic unit measure adjustment. +// +// `unit` one of [0|UnitKiB|UnitKB] zero for no unit +// +// `unitFormat` printf compatible verb for value, like "%f" or "%d" +// +// `wcc` optional WC config +// +// unitFormat example if UnitKiB is chosen: +// +// "%.1f" = "1.0MiB/s" or "% .1f" = "1.0 MiB/s" +func AverageSpeed(unit int, unitFormat string, wcc ...WC) Decorator { + var wc WC + for _, widthConf := range wcc { + wc = widthConf + } + wc.Init() + d := &averageSpeed{ + WC: wc, + unit: unit, + unitFormat: unitFormat, + startTime: time.Now(), + } + return d +} + +type averageSpeed struct { + WC + unit int + unitFormat string + startTime time.Time + msg string + completeMsg *string +} + +func (d *averageSpeed) Decor(st *Statistics) string { + if st.Completed { + if d.completeMsg != nil { + return d.FormatMsg(*d.completeMsg) + } + return d.FormatMsg(d.msg) + } + + timeElapsed := time.Since(d.startTime) + speed := float64(st.Current) / timeElapsed.Seconds() + + switch d.unit { + case UnitKiB: + d.msg = fmt.Sprintf(d.unitFormat, SpeedKiB(speed)) + case UnitKB: + d.msg = fmt.Sprintf(d.unitFormat, SpeedKB(speed)) + default: + d.msg = fmt.Sprintf(d.unitFormat, speed) + } + + return d.FormatMsg(d.msg) +} + +func (d *averageSpeed) OnCompleteMessage(msg string) { + d.completeMsg = &msg +} diff --git a/vendor/github.com/vbauerster/mpb/doc.go b/vendor/github.com/vbauerster/mpb/doc.go new file mode 100644 index 0000000..1624595 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/doc.go @@ -0,0 +1,6 @@ +// Copyright (C) 2016-2018 Vladimir Bauer +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mpb is a library for rendering progress bars in terminal applications. +package mpb diff --git a/vendor/github.com/vbauerster/mpb/internal/percentage.go b/vendor/github.com/vbauerster/mpb/internal/percentage.go new file mode 100644 index 0000000..3c8defb --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/internal/percentage.go @@ -0,0 +1,10 @@ +package internal + +// Percentage is a helper function, to calculate percentage. +func Percentage(total, current, width int64) int64 { + if total <= 0 { + return 0 + } + p := float64(width*current) / float64(total) + return int64(Round(p)) +} diff --git a/vendor/github.com/vbauerster/mpb/internal/round.go b/vendor/github.com/vbauerster/mpb/internal/round.go new file mode 100644 index 0000000..c54a789 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/internal/round.go @@ -0,0 +1,49 @@ +package internal + +import "math" + +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/vbauerster/mpb/options.go b/vendor/github.com/vbauerster/mpb/options.go new file mode 100644 index 0000000..05d2ecf --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/options.go @@ -0,0 +1,95 @@ +package mpb + +import ( + "io" + "sync" + "time" + "unicode/utf8" + + "github.com/vbauerster/mpb/cwriter" +) + +// ProgressOption is a function option which changes the default behavior of +// progress pool, if passed to mpb.New(...ProgressOption) +type ProgressOption func(*pState) + +// WithWaitGroup provides means to have a single joint point. +// If *sync.WaitGroup is provided, you can safely call just p.Wait() +// without calling Wait() on provided *sync.WaitGroup. +// Makes sense when there are more than one bar to render. +func WithWaitGroup(wg *sync.WaitGroup) ProgressOption { + return func(s *pState) { + s.uwg = wg + } +} + +// WithWidth overrides default width 80 +func WithWidth(w int) ProgressOption { + return func(s *pState) { + if w >= 0 { + s.width = w + } + } +} + +// WithFormat overrides default bar format "[=>-]" +func WithFormat(format string) ProgressOption { + return func(s *pState) { + if utf8.RuneCountInString(format) == formatLen { + s.format = format + } + } +} + +// WithRefreshRate overrides default 120ms refresh rate +func WithRefreshRate(d time.Duration) ProgressOption { + return func(s *pState) { + if d < 10*time.Millisecond { + return + } + s.rr = d + } +} + +// WithManualRefresh disables internal auto refresh time.Ticker. +// Refresh will occur upon receive value from provided ch. +func WithManualRefresh(ch <-chan time.Time) ProgressOption { + return func(s *pState) { + s.manualRefreshCh = ch + } +} + +// WithCancel provide your cancel channel, +// which you plan to close at some point. +func WithCancel(ch <-chan struct{}) ProgressOption { + return func(s *pState) { + s.cancel = ch + } +} + +// WithShutdownNotifier provided chanel will be closed, after all bars have been rendered. +func WithShutdownNotifier(ch chan struct{}) ProgressOption { + return func(s *pState) { + s.shutdownNotifier = ch + } +} + +// WithOutput overrides default output os.Stdout +func WithOutput(w io.Writer) ProgressOption { + return func(s *pState) { + if w == nil { + return + } + s.cw = cwriter.New(w) + } +} + +// WithDebugOutput sets debug output. +func WithDebugOutput(w io.Writer) ProgressOption { + return func(s *pState) { + if w == nil { + return + } + s.debugOut = w + } +} diff --git a/vendor/github.com/vbauerster/mpb/options_go1.7.go b/vendor/github.com/vbauerster/mpb/options_go1.7.go new file mode 100644 index 0000000..ca9a5ba --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/options_go1.7.go @@ -0,0 +1,15 @@ +//+build go1.7 + +package mpb + +import "context" + +// WithContext provided context will be used for cancellation purposes +func WithContext(ctx context.Context) ProgressOption { + return func(s *pState) { + if ctx == nil { + panic("ctx must not be nil") + } + s.cancel = ctx.Done() + } +} diff --git a/vendor/github.com/vbauerster/mpb/priority_queue.go b/vendor/github.com/vbauerster/mpb/priority_queue.go new file mode 100644 index 0000000..7bc588c --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/priority_queue.go @@ -0,0 +1,40 @@ +package mpb + +import "container/heap" + +// A priorityQueue implements heap.Interface +type priorityQueue []*Bar + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + return pq[i].priority < pq[j].priority +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + n := len(*pq) + bar := x.(*Bar) + bar.index = n + *pq = append(*pq, bar) +} + +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + bar := old[n-1] + bar.index = -1 // for safety + *pq = old[0 : n-1] + return bar +} + +// update modifies the priority of a Bar in the queue. +func (pq *priorityQueue) update(bar *Bar, priority int) { + bar.priority = priority + heap.Fix(pq, bar.index) +} diff --git a/vendor/github.com/vbauerster/mpb/progress.go b/vendor/github.com/vbauerster/mpb/progress.go new file mode 100644 index 0000000..d95fe45 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/progress.go @@ -0,0 +1,251 @@ +package mpb + +import ( + "container/heap" + "fmt" + "io" + "io/ioutil" + "os" + "sync" + "time" + + "github.com/vbauerster/mpb/cwriter" +) + +const ( + // default RefreshRate + prr = 120 * time.Millisecond + // default width + pwidth = 80 + // default format + pformat = "[=>-]" +) + +// Progress represents the container that renders Progress bars +type Progress struct { + wg *sync.WaitGroup + uwg *sync.WaitGroup + operateState chan func(*pState) + done chan struct{} +} + +type pState struct { + bHeap *priorityQueue + shutdownPending []*Bar + heapUpdated bool + zeroWait bool + idCounter int + width int + format string + rr time.Duration + cw *cwriter.Writer + pMatrix map[int][]chan int + aMatrix map[int][]chan int + + // following are provided by user + uwg *sync.WaitGroup + manualRefreshCh <-chan time.Time + cancel <-chan struct{} + shutdownNotifier chan struct{} + waitBars map[*Bar]*Bar + debugOut io.Writer +} + +// New creates new Progress instance, which orchestrates bars rendering process. +// Accepts mpb.ProgressOption funcs for customization. +func New(options ...ProgressOption) *Progress { + pq := make(priorityQueue, 0) + heap.Init(&pq) + s := &pState{ + bHeap: &pq, + width: pwidth, + format: pformat, + cw: cwriter.New(os.Stdout), + rr: prr, + waitBars: make(map[*Bar]*Bar), + debugOut: ioutil.Discard, + } + + for _, opt := range options { + if opt != nil { + opt(s) + } + } + + p := &Progress{ + uwg: s.uwg, + wg: new(sync.WaitGroup), + operateState: make(chan func(*pState)), + done: make(chan struct{}), + } + go p.serve(s) + return p +} + +// AddBar creates a new progress bar and adds to the container. +func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { + p.wg.Add(1) + result := make(chan *Bar) + select { + case p.operateState <- func(s *pState) { + options = append(options, barWidth(s.width), barFormat(s.format)) + b := newBar(p.wg, s.idCounter, total, s.cancel, options...) + if b.runningBar != nil { + s.waitBars[b.runningBar] = b + } else { + heap.Push(s.bHeap, b) + s.heapUpdated = true + } + s.idCounter++ + result <- b + }: + return <-result + case <-p.done: + p.wg.Done() + return nil + } +} + +// Abort is only effective while bar progress is running, +// it means remove bar now without waiting for its completion. +// If bar is already completed, there is nothing to abort. +// If you need to remove bar after completion, use BarRemoveOnComplete BarOption. +func (p *Progress) Abort(b *Bar, remove bool) { + select { + case p.operateState <- func(s *pState) { + if b.index < 0 { + return + } + if remove { + s.heapUpdated = heap.Remove(s.bHeap, b.index) != nil + } + s.shutdownPending = append(s.shutdownPending, b) + }: + case <-p.done: + } +} + +// UpdateBarPriority provides a way to change bar's order position. +// Zero is highest priority, i.e. bar will be on top. +func (p *Progress) UpdateBarPriority(b *Bar, priority int) { + select { + case p.operateState <- func(s *pState) { s.bHeap.update(b, priority) }: + case <-p.done: + } +} + +// BarCount returns bars count +func (p *Progress) BarCount() int { + result := make(chan int, 1) + select { + case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }: + return <-result + case <-p.done: + return 0 + } +} + +// Wait first waits for user provided *sync.WaitGroup, if any, +// then waits far all bars to complete and finally shutdowns master goroutine. +// After this method has been called, there is no way to reuse *Progress instance. +func (p *Progress) Wait() { + if p.uwg != nil { + p.uwg.Wait() + } + + p.wg.Wait() + + select { + case p.operateState <- func(s *pState) { s.zeroWait = true }: + <-p.done + case <-p.done: + } +} + +func (s *pState) updateSyncMatrix() { + s.pMatrix = make(map[int][]chan int) + s.aMatrix = make(map[int][]chan int) + for i := 0; i < s.bHeap.Len(); i++ { + bar := (*s.bHeap)[i] + table := bar.wSyncTable() + pRow, aRow := table[0], table[1] + + for i, ch := range pRow { + s.pMatrix[i] = append(s.pMatrix[i], ch) + } + + for i, ch := range aRow { + s.aMatrix[i] = append(s.aMatrix[i], ch) + } + } +} + +func (s *pState) render(tw int) { + if s.heapUpdated { + s.updateSyncMatrix() + s.heapUpdated = false + } + syncWidth(s.pMatrix) + syncWidth(s.aMatrix) + + for i := 0; i < s.bHeap.Len(); i++ { + bar := (*s.bHeap)[i] + go bar.render(s.debugOut, tw) + } + + if err := s.flush(s.bHeap.Len()); err != nil { + fmt.Fprintf(s.debugOut, "%s %s %v\n", "[mpb]", time.Now(), err) + } +} + +func (s *pState) flush(lineCount int) error { + for s.bHeap.Len() > 0 { + bar := heap.Pop(s.bHeap).(*Bar) + frameReader := <-bar.frameReaderCh + defer func() { + if frameReader.toShutdown { + // shutdown at next flush, in other words decrement underlying WaitGroup + // only after the bar with completed state has been flushed. + // this ensures no bar ends up with less than 100% rendered. + s.shutdownPending = append(s.shutdownPending, bar) + if replacementBar, ok := s.waitBars[bar]; ok { + heap.Push(s.bHeap, replacementBar) + s.heapUpdated = true + delete(s.waitBars, bar) + } + if frameReader.removeOnComplete { + s.heapUpdated = true + return + } + } + heap.Push(s.bHeap, bar) + }() + s.cw.ReadFrom(frameReader) + lineCount += frameReader.extendedLines + } + + for i := len(s.shutdownPending) - 1; i >= 0; i-- { + close(s.shutdownPending[i].shutdown) + s.shutdownPending = s.shutdownPending[:i] + } + + return s.cw.Flush(lineCount) +} + +func syncWidth(matrix map[int][]chan int) { + for _, column := range matrix { + column := column + go func() { + var maxWidth int + for _, ch := range column { + w := <-ch + if w > maxWidth { + maxWidth = w + } + } + for _, ch := range column { + ch <- maxWidth + } + }() + } +} diff --git a/vendor/github.com/vbauerster/mpb/progress_posix.go b/vendor/github.com/vbauerster/mpb/progress_posix.go new file mode 100644 index 0000000..545245a --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/progress_posix.go @@ -0,0 +1,70 @@ +// +build !windows + +package mpb + +import ( + "os" + "os/signal" + "syscall" + "time" +) + +func (p *Progress) serve(s *pState) { + + var ticker *time.Ticker + var refreshCh <-chan time.Time + var winch chan os.Signal + var resumeTimer *time.Timer + var resumeEvent <-chan time.Time + winchIdleDur := s.rr * 2 + + if s.manualRefreshCh == nil { + ticker = time.NewTicker(s.rr) + refreshCh = ticker.C + winch = make(chan os.Signal, 2) + signal.Notify(winch, syscall.SIGWINCH) + } else { + refreshCh = s.manualRefreshCh + } + + for { + select { + case op := <-p.operateState: + op(s) + case <-refreshCh: + if s.zeroWait { + if s.manualRefreshCh == nil { + signal.Stop(winch) + ticker.Stop() + } + if s.shutdownNotifier != nil { + close(s.shutdownNotifier) + } + close(p.done) + return + } + tw, err := s.cw.GetWidth() + if err != nil { + tw = s.width + } + s.render(tw) + case <-winch: + tw, err := s.cw.GetWidth() + if err != nil { + tw = s.width + } + s.render(tw - tw/8) + if resumeTimer != nil && resumeTimer.Reset(winchIdleDur) { + break + } + ticker.Stop() + resumeTimer = time.NewTimer(winchIdleDur) + resumeEvent = resumeTimer.C + case <-resumeEvent: + ticker = time.NewTicker(s.rr) + refreshCh = ticker.C + resumeEvent = nil + resumeTimer = nil + } + } +} diff --git a/vendor/github.com/vbauerster/mpb/progress_windows.go b/vendor/github.com/vbauerster/mpb/progress_windows.go new file mode 100644 index 0000000..cab03d3 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/progress_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package mpb + +import ( + "time" +) + +func (p *Progress) serve(s *pState) { + + var ticker *time.Ticker + var refreshCh <-chan time.Time + + if s.manualRefreshCh == nil { + ticker = time.NewTicker(s.rr) + refreshCh = ticker.C + } else { + refreshCh = s.manualRefreshCh + } + + for { + select { + case op := <-p.operateState: + op(s) + case <-refreshCh: + if s.zeroWait { + if s.manualRefreshCh == nil { + ticker.Stop() + } + if s.shutdownNotifier != nil { + close(s.shutdownNotifier) + } + close(p.done) + return + } + tw, err := s.cw.GetWidth() + if err != nil { + tw = s.width + } + s.render(tw) + } + } +} diff --git a/vendor/github.com/vbauerster/mpb/proxyreader.go b/vendor/github.com/vbauerster/mpb/proxyreader.go new file mode 100644 index 0000000..d2692cc --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/proxyreader.go @@ -0,0 +1,22 @@ +package mpb + +import ( + "io" + "time" +) + +// proxyReader is io.Reader wrapper, for proxy read bytes +type proxyReader struct { + io.ReadCloser + bar *Bar + iT time.Time +} + +func (pr *proxyReader) Read(p []byte) (n int, err error) { + n, err = pr.ReadCloser.Read(p) + if n > 0 { + pr.bar.IncrBy(n, time.Since(pr.iT)) + pr.iT = time.Now() + } + return +}