From ff814a559d444b66cac4452d2fa9b4f7d67462c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Nov 2025 09:02:57 +0000 Subject: [PATCH] Bump github.com/google/go-containerregistry from 0.20.6 to 0.20.7 Bumps [github.com/google/go-containerregistry](https://github.com/google/go-containerregistry) from 0.20.6 to 0.20.7. - [Release notes](https://github.com/google/go-containerregistry/releases) - [Changelog](https://github.com/google/go-containerregistry/blob/main/.goreleaser.yml) - [Commits](https://github.com/google/go-containerregistry/compare/v0.20.6...v0.20.7) --- updated-dependencies: - dependency-name: github.com/google/go-containerregistry dependency-version: 0.20.7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 12 +- go.sum | 28 +-- .../stargz-snapshotter/estargz/build.go | 119 ++++++++++--- .../stargz-snapshotter/estargz/estargz.go | 9 + .../stargz-snapshotter/estargz/gzip.go | 6 +- .../stargz-snapshotter/estargz/testutil.go | 168 +++++++++++------- .../go-containerregistry/pkg/v1/config.go | 9 +- .../pkg/v1/mutate/mutate.go | 2 +- .../pkg/v1/remote/options.go | 9 +- .../pkg/v1/remote/transport/error.go | 6 +- .../klauspost/compress/flate/deflate.go | 49 ++--- .../klauspost/compress/flate/dict_decoder.go | 5 +- .../klauspost/compress/flate/fast_encoder.go | 49 +---- .../compress/flate/huffman_bit_writer.go | 42 ++--- .../klauspost/compress/flate/huffman_code.go | 2 +- .../klauspost/compress/flate/inflate.go | 4 +- .../klauspost/compress/flate/level5.go | 5 +- .../klauspost/compress/flate/stateless.go | 4 +- .../klauspost/compress/fse/bitwriter.go | 2 +- .../klauspost/compress/fse/compress.go | 2 +- .../klauspost/compress/huff0/bitwriter.go | 2 +- .../klauspost/compress/huff0/compress.go | 6 +- .../klauspost/compress/huff0/decompress.go | 14 +- .../compress/huff0/decompress_amd64.go | 7 +- .../klauspost/compress/huff0/huff0.go | 4 +- .../compress/internal/le/unsafe_disabled.go | 4 +- .../compress/internal/le/unsafe_enabled.go | 9 +- .../compress/internal/snapref/decode.go | 2 +- .../compress/internal/snapref/encode.go | 4 +- .../klauspost/compress/zstd/bitwriter.go | 2 +- .../klauspost/compress/zstd/blockdec.go | 6 +- .../klauspost/compress/zstd/decoder.go | 8 +- .../klauspost/compress/zstd/dict.go | 20 +-- .../klauspost/compress/zstd/enc_base.go | 10 +- .../klauspost/compress/zstd/enc_best.go | 23 +-- .../klauspost/compress/zstd/enc_better.go | 30 +--- .../klauspost/compress/zstd/enc_dfast.go | 32 +--- .../klauspost/compress/zstd/enc_fast.go | 30 +--- .../klauspost/compress/zstd/framedec.go | 5 +- .../klauspost/compress/zstd/fse_encoder.go | 2 +- .../klauspost/compress/zstd/seqdec.go | 5 +- .../klauspost/compress/zstd/seqdec_amd64.go | 10 +- .../klauspost/compress/zstd/simple_go124.go | 56 ++++++ .../klauspost/compress/zstd/snappy.go | 2 +- .../github.com/klauspost/compress/zstd/zip.go | 2 +- .../klauspost/compress/zstd/zstd.go | 4 +- .../vbatts/tar-split/archive/tar/common.go | 1 + .../vbatts/tar-split/archive/tar/reader.go | 9 +- vendor/golang.org/x/mod/semver/semver.go | 4 +- vendor/golang.org/x/oauth2/deviceauth.go | 31 +++- vendor/golang.org/x/oauth2/google/google.go | 2 +- vendor/golang.org/x/oauth2/oauth2.go | 2 +- vendor/golang.org/x/oauth2/pkce.go | 2 +- vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/oauth2/transport.go | 2 +- vendor/modules.txt | 18 +- 56 files changed, 466 insertions(+), 438 deletions(-) create mode 100644 vendor/github.com/klauspost/compress/zstd/simple_go124.go diff --git a/go.mod b/go.mod index b1feea6ae4..7d57fad4e3 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/docker/docker v28.5.2+incompatible github.com/fatih/color v1.18.0 github.com/google/go-cmp v0.7.0 - github.com/google/go-containerregistry v0.20.6 + github.com/google/go-containerregistry v0.20.7 github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b github.com/hinshun/vt10x v0.0.0-20220228203356-1ab2cad5fd82 github.com/jonboulle/clockwork v0.5.0 @@ -127,7 +127,7 @@ require ( github.com/cloudevents/sdk-go/v2 v2.16.2 // indirect github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect - github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/coreos/go-oidc/v3 v3.14.1 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -236,7 +236,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.1 // indirect github.com/ktr0731/go-ansisgr v0.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect @@ -314,7 +314,7 @@ require ( github.com/transparency-dev/formats v0.0.0-20250421220931-bb8ad4d07c26 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect github.com/transparency-dev/tessera v1.0.0-rc3 // indirect - github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect @@ -342,9 +342,9 @@ require ( gocloud.dev/pubsub/kafkapubsub v0.43.0 // indirect golang.org/x/crypto v0.45.0 // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/mod v0.29.0 // indirect + golang.org/x/mod v0.30.0 // indirect golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.31.0 // indirect diff --git a/go.sum b/go.sum index 4e78f22625..57ae2f6d61 100644 --- a/go.sum +++ b/go.sum @@ -909,8 +909,8 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -1260,8 +1260,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= -github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20240108195214-a0658aa1d0cc h1:eJ9J17+23quNw5z6O9AdTH+irI7JI+6eQX9TswViyvk= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20240108195214-a0658aa1d0cc/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY= github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20240108195214-a0658aa1d0cc h1:fHDosK/RhxYQpWBRo+bbawVuR402odSaNToA0Pp+ojw= @@ -1530,8 +1530,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1968,8 +1968,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= -github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vdemeester/cr-20160607 v1.0.1 h1:nHyI7BZNR04QFtgItJFVAr8SLeoVIFd8co+DODxnPKE= github.com/vdemeester/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4= @@ -2198,8 +2198,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2310,8 +2310,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= -golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2574,8 +2574,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 6aba0ef1f6..a9e1b72ba7 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -35,6 +35,7 @@ import ( "runtime" "strings" "sync" + "sync/atomic" "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" @@ -42,6 +43,8 @@ import ( "golang.org/x/sync/errgroup" ) +type GzipHelperFunc func(io.Reader) (io.ReadCloser, error) + type options struct { chunkSize int compressionLevel int @@ -50,6 +53,7 @@ type options struct { compression Compression ctx context.Context minChunkSize int + gzipHelperFunc GzipHelperFunc } type Option func(o *options) error @@ -127,11 +131,25 @@ func WithMinChunkSize(minChunkSize int) Option { } } +// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers. +// When a gzip-compressed layer is detected, this function will be used instead of the +// Go standard library gzip decompression for better performance. +// The function should take an io.Reader as input and return an io.ReadCloser. +// If nil, the Go standard library gzip.NewReader will be used. +func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option { + return func(o *options) error { + o.gzipHelperFunc = gzipHelperFunc + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser - diffID digest.Digester - tocDigest digest.Digest + diffID digest.Digester + tocDigest digest.Digest + readCompleted *atomic.Bool + uncompressedSize *atomic.Int64 } // DiffID returns the digest of uncompressed blob. @@ -145,6 +163,19 @@ func (b *Blob) TOCDigest() digest.Digest { return b.tocDigest } +// UncompressedSize returns the size of uncompressed blob. +// UncompressedSize should only be called after the blob has been fully read. +func (b *Blob) UncompressedSize() (int64, error) { + switch { + case b.uncompressedSize == nil || b.readCompleted == nil: + return -1, fmt.Errorf("readCompleted or uncompressedSize is not initialized") + case !b.readCompleted.Load(): + return -1, fmt.Errorf("called UncompressedSize before the blob has been fully read") + default: + return b.uncompressedSize.Load(), nil + } +} + // Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd // or plain tar) passed through the argument. If there are some prioritized files are listed in // the option, these files are grouped as "prioritized" and can be used for runtime optimization @@ -186,7 +217,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) } }() - tarBlob, err := decompressBlob(tarBlob, layerFiles) + tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc) if err != nil { return nil, err } @@ -252,17 +283,28 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { } diffID := digest.Canonical.Digester() pr, pw := io.Pipe() + readCompleted := new(atomic.Bool) + uncompressedSize := new(atomic.Int64) go func() { - r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + var size int64 + var decompressFunc func(io.Reader) (io.ReadCloser, error) + if _, ok := opts.compression.(*gzipCompression); ok && opts.gzipHelperFunc != nil { + decompressFunc = opts.gzipHelperFunc + } else { + decompressFunc = opts.compression.Reader + } + decompressR, err := decompressFunc(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) if err != nil { pw.CloseWithError(err) return } - defer r.Close() - if _, err := io.Copy(diffID.Hash(), r); err != nil { + defer decompressR.Close() + if size, err = io.Copy(diffID.Hash(), decompressR); err != nil { pw.CloseWithError(err) return } + uncompressedSize.Store(size) + readCompleted.Store(true) pw.Close() }() return &Blob{ @@ -270,8 +312,10 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { Reader: pr, closeFunc: layerFiles.CleanupAll, }, - tocDigest: tocDgst, - diffID: diffID, + tocDigest: tocDgst, + diffID: diffID, + readCompleted: readCompleted, + uncompressedSize: uncompressedSize, }, nil } @@ -366,8 +410,9 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri // Sort the tar file respecting to the prioritized files list. sorted := &tarFile{} + picked := make(map[string]struct{}) for _, l := range prioritized { - if err := moveRec(l, intar, sorted); err != nil { + if err := moveRec(l, intar, sorted, picked); err != nil { if errors.Is(err, errNotFound) && missedPrioritized != nil { *missedPrioritized = append(*missedPrioritized, l) continue // allow not found @@ -395,8 +440,8 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri }) } - // Dump all entry and concatinate them. - return append(sorted.dump(), intar.dump()...), nil + // Dump prioritized entries followed by the rest entries while skipping picked ones. + return append(sorted.dump(nil), intar.dump(picked)...), nil } // readerFromEntries returns a reader of tar archive that contains entries passed @@ -408,11 +453,11 @@ func readerFromEntries(entries ...*entry) io.Reader { defer tw.Close() for _, entry := range entries { if err := tw.WriteHeader(entry.header); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar header: %v", err)) return } if _, err := io.Copy(tw, entry.payload); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar payload: %v", err)) return } } @@ -458,36 +503,42 @@ func importTar(in io.ReaderAt) (*tarFile, error) { return tf, nil } -func moveRec(name string, in *tarFile, out *tarFile) error { +func moveRec(name string, in *tarFile, out *tarFile, picked map[string]struct{}) error { name = cleanEntryName(name) if name == "" { // root directory. stop recursion. if e, ok := in.get(name); ok { // entry of the root directory exists. we should move it as well. // this case will occur if tar entries are prefixed with "./", "/", etc. - out.add(e) - in.remove(name) + if _, done := picked[name]; !done { + out.add(e) + picked[name] = struct{}{} + } } return nil } _, okIn := in.get(name) _, okOut := out.get(name) - if !okIn && !okOut { + _, okPicked := picked[name] + if !okIn && !okOut && !okPicked { return fmt.Errorf("file: %q: %w", name, errNotFound) } parent, _ := path.Split(strings.TrimSuffix(name, "/")) - if err := moveRec(parent, in, out); err != nil { + if err := moveRec(parent, in, out, picked); err != nil { return err } if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { - if err := moveRec(e.header.Linkname, in, out); err != nil { + if err := moveRec(e.header.Linkname, in, out, picked); err != nil { return err } } + if _, done := picked[name]; done { + return nil + } if e, ok := in.get(name); ok { out.add(e) - in.remove(name) + picked[name] = struct{}{} } return nil } @@ -533,8 +584,18 @@ func (f *tarFile) get(name string) (e *entry, ok bool) { return } -func (f *tarFile) dump() []*entry { - return f.stream +func (f *tarFile) dump(skip map[string]struct{}) []*entry { + if len(skip) == 0 { + return f.stream + } + var out []*entry + for _, e := range f.stream { + if _, ok := skip[cleanEntryName(e.header.Name)]; ok { + continue + } + out = append(out, e) + } + return out } type readCloser struct { @@ -627,12 +688,12 @@ func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { default: - return 0, fmt.Errorf("Unknown whence: %v", whence) + return 0, fmt.Errorf("unknown whence: %v", whence) case io.SeekStart: case io.SeekCurrent: offset += *cr.cPos case io.SeekEnd: - return 0, fmt.Errorf("Unsupported whence: %v", whence) + return 0, fmt.Errorf("unsupported whence: %v", whence) } if offset < 0 { @@ -649,7 +710,7 @@ func (cr *countReadSeeker) currentPos() int64 { return *cr.cPos } -func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { +func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) { if org.Size() < 4 { return org, nil } @@ -660,7 +721,13 @@ func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, e var dR io.Reader if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { // gzip - dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + var dgR io.ReadCloser + var err error + if gzipHelperFunc != nil { + dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size())) + } else { + dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + } if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index f4d5546558..ff91a37add 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -307,6 +307,15 @@ func (r *Reader) initFields() error { } } + if len(r.m) == 0 { + r.m[""] = &TOCEntry{ + Name: "", + Type: "dir", + Mode: 0755, + NumLink: 1, + } + } + return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index f24afe32f4..88fa13b191 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -109,7 +109,7 @@ func gzipFooterBytes(tocOff int64) []byte { header[0], header[1] = 'S', 'G' subfield := fmt.Sprintf("%016xSTARGZ", tocOff) binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 - gz.Header.Extra = append(header, []byte(subfield)...) + gz.Extra = append(header, []byte(subfield)...) gz.Close() if buf.Len() != FooterSize { panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) @@ -136,7 +136,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t return 0, 0, 0, err } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] if si1 != 'S' || si2 != 'G' { return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) @@ -181,7 +181,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra if len(extra) != 16+len("STARGZ") { return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index ba650b4d1d..ff165e090e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -38,7 +38,6 @@ import ( "reflect" "sort" "strings" - "testing" "time" "github.com/containerd/stargz-snapshotter/estargz/errorutil" @@ -49,16 +48,48 @@ import ( // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - TestStreams(t *testing.T, b []byte, streams []int64) - DiffIDOf(*testing.T, []byte) string + TestStreams(t TestingT, b []byte, streams []int64) + DiffIDOf(TestingT, []byte) string String() string } +// TestingT is the minimal set of testing.T required to run the +// tests defined in CompressionTestSuite. This interface exists to prevent +// leaking the testing package from being exposed outside tests. +type TestingT interface { + Errorf(format string, args ...any) + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Logf(format string, args ...any) + Parallel() +} + +// Runner allows running subtests of TestingT. This exists instead of adding +// a Run method to TestingT interface because the Run implementation of +// testing.T would not satisfy the interface. +type Runner func(t TestingT, name string, fn func(t TestingT)) + +type TestRunner struct { + TestingT + Runner Runner +} + +func (r *TestRunner) Run(name string, run func(*TestRunner)) { + r.Runner(r.TestingT, name, func(t TestingT) { + run(&TestRunner{TestingT: t, Runner: r.Runner}) + }) +} + // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { - t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) - t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) - t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *TestRunner) { + t.Parallel() + testDigestAndVerify(t, controllers...) + }) + t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } type TestingControllerFactory func() TestingController @@ -79,7 +110,7 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingControllerFactory) { +func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string chunkSize int @@ -165,7 +196,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { prefix := prefix for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) { tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) // Test divideEntries() entries, err := sortEntries(tarBlob, nil, nil) // identical order @@ -265,7 +296,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { } } -func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") @@ -325,7 +356,7 @@ func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingContr return true } -func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) @@ -339,7 +370,7 @@ func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingCon return aJTOC.Version == bJTOC.Version } -func isSameEntries(t *testing.T, a, b *Reader) bool { +func isSameEntries(t TestingT, a, b *Reader) bool { aroot, ok := a.Lookup("") if !ok { t.Fatalf("failed to get root of A") @@ -353,18 +384,19 @@ func isSameEntries(t *testing.T, a, b *Reader) bool { return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) } -func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { +func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader { buf := new(bytes.Buffer) var w io.WriteCloser var err error - if srcCompression == gzipType { + switch srcCompression { + case gzipType: w = gzip.NewWriter(buf) - } else if srcCompression == zstdType { + case zstdType: w, err = zstd.NewWriter(buf) if err != nil { t.Fatalf("failed to init zstd writer: %v", err) } - } else { + default: return src } src.Seek(0, io.SeekStart) @@ -386,7 +418,7 @@ type stargzEntry struct { // contains checks if all child entries in "b" are also contained in "a". // This function also checks if the files/chunks contain the same contents among "a" and "b". -func contains(t *testing.T, a, b stargzEntry) bool { +func contains(t TestingT, a, b stargzEntry) bool { ae, ar := a.e, a.r be, br := b.e, b.r t.Logf("Comparing: %q vs %q", ae.Name, be.Name) @@ -445,7 +477,7 @@ func contains(t *testing.T, a, b stargzEntry) bool { bbytes, bnext, bok := readOffset(t, bf, nr, b) if !aok && !bok { break - } else if !(aok && bok) || anext != bnext { + } else if !aok || !bok || anext != bnext { t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", ae.Name, be.Name, nr, aok, bok, anext, bnext) return false @@ -497,7 +529,7 @@ func equalEntry(a, b *TOCEntry) bool { a.Digest == b.Digest } -func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { +func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) if !ok { return nil, 0, false @@ -516,7 +548,7 @@ func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) return data[:n], offset + ce.ChunkSize, true } -func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { +func dumpTOCJSON(t TestingT, tocJSON *JTOC) string { jtocData, err := json.Marshal(*tocJSON) if err != nil { t.Fatalf("failed to marshal TOC JSON: %v", err) @@ -530,20 +562,19 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 -// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) +type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { +func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) checks []check minChunkSize []int }{ { name: "no-regfile", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( dir("test/"), ) @@ -558,7 +589,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "small-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -582,7 +613,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "big-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -606,7 +637,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { name: "with-non-regfiles", minChunkSize: []int{0, 64000}, - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -653,7 +684,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) srcTarFormat := srcTarFormat for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) { // Get original tar file and chunk digests dgstMap := make(map[string]digest.Digest) tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) @@ -689,7 +720,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -800,7 +831,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -881,9 +912,9 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ - "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var found bool for _, e := range toc.Entries { if cleanEntryName(e.Name) == filename { @@ -901,7 +932,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { t.Fatalf("rewrite target not found") } }, - "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var ( sampleEntry *TOCEntry targetEntry *TOCEntry @@ -928,7 +959,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } for name, rFunc := range funcs { - t.Run(name, func(t *testing.T) { + t.Run(name, func(t *TestRunner) { newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) buf := new(bytes.Buffer) if _, err := io.Copy(buf, newSgz); err != nil { @@ -957,7 +988,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { cl := newController() rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { @@ -989,7 +1020,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1046,9 +1077,9 @@ func chunkID(name string, offset, size int64) string { return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) } -type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) +type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader) -func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { +func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) if err != nil { t.Fatalf("failed to extract TOC JSON: %v", err) @@ -1119,7 +1150,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { +func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" @@ -1463,7 +1494,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) @@ -1529,6 +1560,9 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { if err != nil { t.Fatalf("stargz.Open: %v", err) } + if _, ok := r.Lookup(""); !ok { + t.Fatalf("failed to lookup rootdir: %v", err) + } wantTOCVersion := 1 if tt.wantTOCVersion > 0 { wantTOCVersion = tt.wantTOCVersion @@ -1627,7 +1661,7 @@ func digestFor(content string) string { type numTOCEntries int -func (n numTOCEntries) check(t *testing.T, r *Reader) { +func (n numTOCEntries) check(t TestingT, r *Reader) { if r.toc == nil { t.Fatal("nil TOC") } @@ -1647,15 +1681,15 @@ func (n numTOCEntries) check(t *testing.T, r *Reader) { func checks(s ...stargzCheck) []stargzCheck { return s } type stargzCheck interface { - check(t *testing.T, r *Reader) + check(t TestingT, r *Reader) } -type stargzCheckFn func(*testing.T, *Reader) +type stargzCheckFn func(TestingT, *Reader) -func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } +func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) } func maxDepth(max int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup("") if !ok { t.Fatal("root directory not found") @@ -1672,7 +1706,7 @@ func maxDepth(max int) stargzCheck { }) } -func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { +func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) { if current > limit { return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", current, limit) @@ -1694,7 +1728,7 @@ func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr e } func hasFileLen(file string, wantLen int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1710,7 +1744,7 @@ func hasFileLen(file string, wantLen int) stargzCheck { } func hasFileXattrs(file, name, value string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1737,7 +1771,7 @@ func hasFileXattrs(file, name, value string) stargzCheck { } func hasFileDigest(file string, digest string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("didn't find TOCEntry for file %q", file) @@ -1749,7 +1783,7 @@ func hasFileDigest(file string, digest string) stargzCheck { } func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { extraMap := make(map[string]chunkInfo) for _, e := range extra { extraMap[e.name] = e @@ -1796,7 +1830,7 @@ func hasFileContentsWithPreRead(file string, offset int, want string, extra ...c } func hasFileContentsRange(file string, offset int, want string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { f, err := r.OpenFile(file) if err != nil { t.Fatal(err) @@ -1813,7 +1847,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { } func hasChunkEntries(file string, wantChunks int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("no file for %q", file) @@ -1857,7 +1891,7 @@ func hasChunkEntries(file string, wantChunks int) stargzCheck { } func entryHasChildren(dir string, want ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { want := append([]string(nil), want...) var got []string ent, ok := r.Lookup(dir) @@ -1876,7 +1910,7 @@ func entryHasChildren(dir string, want ...string) stargzCheck { } func hasDir(file string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1890,7 +1924,7 @@ func hasDir(file string) stargzCheck { } func hasDirLinkCount(file string, count int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1908,7 +1942,7 @@ func hasDirLinkCount(file string, count int) stargzCheck { } func hasMode(file string, mode os.FileMode) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Stat().Mode() != mode { @@ -1923,7 +1957,7 @@ func hasMode(file string, mode os.FileMode) stargzCheck { } func hasSymlink(file, target string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "symlink" { @@ -1939,7 +1973,7 @@ func hasSymlink(file, target string) stargzCheck { } func lookupMatch(name string, want *TOCEntry) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup(name) if !ok { t.Fatalf("failed to Lookup entry %q", name) @@ -1952,7 +1986,7 @@ func lookupMatch(name string, want *TOCEntry) stargzCheck { } func hasEntryOwner(entry string, owner owner) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) if !ok { t.Errorf("entry %q not found", entry) @@ -1966,7 +2000,7 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { } func mustSameEntry(files ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { var first *TOCEntry for _, f := range files { if first == nil { @@ -2038,7 +2072,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format return f(tw, prefix, format) } -func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { +func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { format := tar.FormatUnknown for _, opt := range opts { switch v := opt.(type) { @@ -2247,7 +2281,7 @@ func noPrefetchLandmark() tarEntry { }) } -func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { +func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { if digestMap == nil { t.Fatalf("digest map mustn't be nil") } @@ -2317,7 +2351,7 @@ func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } -func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { +func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { if len(streams) == 0 { return // nop } @@ -2346,8 +2380,8 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { t.Fatalf("countStreams(gzip), Copy: %v", err) } var extra string - if len(zr.Header.Extra) > 0 { - extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + if len(zr.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Extra) } t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) delete(wants, int64(zoff)) @@ -2355,7 +2389,7 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { } } -func GzipDiffIDOf(t *testing.T, b []byte) string { +func GzipDiffIDOf(t TestingT, b []byte) string { h := sha256.New() zr, err := gzip.NewReader(bytes.NewReader(b)) if err != nil { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index 960c93b5f4..b62d848269 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -27,10 +27,11 @@ import ( // docker_version and os.version are not part of the spec but included // for backwards compatibility. type ConfigFile struct { - Architecture string `json:"architecture"` - Author string `json:"author,omitempty"` - Container string `json:"container,omitempty"` - Created Time `json:"created,omitempty"` + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + // Deprecated: This field is deprecated and will be removed in the next release. DockerVersion string `json:"docker_version,omitempty"` History []History `json:"history,omitempty"` OS string `json:"os"` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index c044796004..409877bce0 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -514,7 +514,7 @@ func Canonical(img v1.Image) (v1.Image, error) { cfg.Container = "" cfg.Config.Hostname = "" - cfg.DockerVersion = "" + cfg.DockerVersion = "" //nolint:staticcheck // Field will be removed in next release return ConfigFile(img, cfg) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index 99a2bb2eb2..15b7da1e41 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -162,9 +162,14 @@ func makeOptions(opts ...Option) (*options, error) { o.transport = transport.NewLogger(o.transport) } - // Wrap the transport in something that can retry network flakes. - o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) + // Using customized retry predicate if provided, and fallback to default if not. + predicate := o.retryPredicate + if predicate == nil { + predicate = defaultRetryPredicate + } + // Wrap the transport in something that can retry network flakes. + o.transport = transport.NewRetry(o.transport, transport.WithRetryBackoff(o.retryBackoff), transport.WithRetryPredicate(predicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) // Wrap this last to prevent transport.New from double-wrapping. if o.userAgent != "" { o.transport = transport.NewUserAgent(o.transport, o.userAgent) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index 482a4adee7..d38e676249 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -25,7 +25,7 @@ import ( ) // Error implements error to support the following error specification: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors +// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors type Error struct { Errors []Diagnostic `json:"errors,omitempty"` // The http status code returned. @@ -111,7 +111,7 @@ func (d Diagnostic) String() string { type ErrorCode string // The set of error conditions a registry may return: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 +// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors-2 const ( BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" @@ -170,7 +170,7 @@ func CheckError(resp *http.Response, codes ...int) error { } func makeError(resp *http.Response, body []byte) *Error { - // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors + // https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors structuredError := &Error{} // This can fail if e.g. the response body is not valid JSON. That's fine, diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index af53fb860c..4e92f5998a 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -6,11 +6,12 @@ package flate import ( - "encoding/binary" "errors" "fmt" "io" "math" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -234,12 +235,9 @@ func (d *compressor) fillWindow(b []byte) { // Calculate 256 hashes at the time (more L1 cache hits) loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { + for j := range loops { startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } + end := min(startindex+256+minMatchLength-1, n) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 @@ -269,18 +267,12 @@ func (d *compressor) fillWindow(b []byte) { // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } + minMatchLook := min(lookahead, maxMatchLength) win := d.window[0 : pos+minMatchLook] // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } + nice := min(d.nice, len(win)-pos) // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain @@ -288,10 +280,7 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of wEnd := win[pos+length] wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(pos-windowSize, 0) offset = 0 if d.chain < 100 { @@ -374,7 +363,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error { // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) + return hash4u(le.Load32(b, 0), hashBits) } // hash4 returns the hash of u to fit in a hash table with h bits. @@ -389,7 +378,7 @@ func bulkHash4(b []byte, dst []uint32) { if len(b) < 4 { return } - hb := binary.LittleEndian.Uint32(b) + hb := le.Load32(b, 0) dst[0] = hash4u(hb, hashBits) end := len(b) - 4 + 1 @@ -480,10 +469,7 @@ func (d *compressor) deflateLazy() { prevOffset := s.offset s.length = minMatchLength - 1 s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(s.index-windowSize, 0) if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { @@ -503,10 +489,7 @@ func (d *compressor) deflateLazy() { if prevLength < maxMatchLength-checkOff { prevIndex := s.index - 1 if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } + end := min(lookahead, maxMatchLength+checkOff) end += prevIndex // Hash at match end. @@ -603,15 +586,9 @@ func (d *compressor) deflateLazy() { // table. newIndex := s.index + prevLength - 1 // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } + end := min(newIndex, s.maxInsertIndex) end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } + startindex := min(s.index+1, s.maxInsertIndex) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index bb36351a5a..cb855abc4b 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -104,10 +104,7 @@ func (dd *dictDecoder) writeCopy(dist, length int) int { dstBase := dd.wrPos dstPos := dstBase srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } + endPos := min(dstPos+length, len(dd.hist)) // Copy non-overlapping section after destination position. // diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0e8b1630c0..791c9dcbfd 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -7,7 +7,6 @@ package flate import ( "fmt" - "math/bits" "github.com/klauspost/compress/internal/le" ) @@ -151,29 +150,9 @@ func (e *fastGen) matchlen(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := min(s+maxMatchLength-4, len(src)) - left := s1 - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:s1] + a := src[s:min(s+maxMatchLength-4, len(src))] b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(a, b)) } // matchlenLong will return the match length between offsets and t in src. @@ -193,29 +172,7 @@ func (e *fastGen) matchlenLong(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - // Extend the match to be as long as possible. - left := len(src) - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:] - b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index afdc8c053a..03a1796979 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -211,7 +211,9 @@ func (w *huffmanBitWriter) flush() { n++ } w.bits = 0 - w.write(w.bytes[:n]) + if n > 0 { + w.write(w.bytes[:n]) + } w.nbytes = 0 } @@ -303,10 +305,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE w.codegenFreq[size]++ count-- for count >= 3 { - n := 6 - if n > count { - n = count - } + n := min(6, count) codegen[outIndex] = 16 outIndex++ codegen[outIndex] = uint8(n - 3) @@ -316,10 +315,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE } } else { for count >= 11 { - n := 138 - if n > count { - n = count - } + n := min(138, count) codegen[outIndex] = 18 outIndex++ codegen[outIndex] = uint8(n - 11) @@ -438,8 +434,8 @@ func (w *huffmanBitWriter) writeOutBits() { w.nbits -= 48 n := w.nbytes - // We over-write, but faster... - le.Store64(w.bytes[n:], bits) + // We overwrite, but faster... + le.Store64(w.bytes[:], n, bits) n += 6 if n >= bufferFlushSize { @@ -472,7 +468,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) - for i := 0; i < numCodegens; i++ { + for i := range numCodegens { value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) w.writeBits(int32(value), 3) } @@ -650,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 } - numLiterals, numOffsets := w.indexTokens(tokens, !sync) + numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync) extraBits := 0 ssize, storable := w.storedSize(input) @@ -855,8 +851,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -883,8 +878,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -906,8 +900,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -932,8 +925,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -954,8 +946,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) nbits += uint8(offsetComb) if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -1108,7 +1099,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // We must have at least 48 bits free. if nbits >= 8 { n := nbits >> 3 - le.Store64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[:], nbytes, bits) bits >>= (n * 8) & 63 nbits -= n * 8 nbytes += n @@ -1137,8 +1128,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index be7b58b473..5f901bd0fe 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -91,7 +91,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { h := newHuffmanEncoder(literalCount) codes := h.codes var ch uint16 - for ch = 0; ch < literalCount; ch++ { + for ch = range uint16(literalCount) { var bits uint16 var size uint8 switch { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 0d7b437f1c..6e90126db0 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -485,7 +485,7 @@ func (f *decompressor) readHuffman() error { f.nb -= 5 + 5 + 4 // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { + for i := range nclen { for f.nb < 3 { if err := f.moreBits(); err != nil { return err @@ -776,7 +776,7 @@ func fixedHuffmanDecoderInit() { fixedOnce.Do(func() { // These come from the RFC section 3.2.6. var bits [288]int - for i := 0; i < 144; i++ { + for i := range 144 { bits[i] = 8 } for i := 144; i < 256; i++ { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 6e5c21502f..a22ad7d125 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -677,10 +677,7 @@ func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } + s1 := min(int(s)+maxMatchLength-4, len(src)) // Extend the match to be as long as possible. return int32(matchLen(src[s:s1], src[t:])) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 13b9b100db..90b74f7acd 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -56,7 +56,7 @@ func NewStatelessWriter(dst io.Writer) io.WriteCloser { // bitWriterPool contains bit writers that can be reused. var bitWriterPool = sync.Pool{ - New: func() interface{} { + New: func() any { return newHuffmanBitWriter(nil) }, } @@ -184,7 +184,7 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { // Index until startAt if startAt > 0 { cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { + for i := range startAt { table[hashSL(cv)] = tableEntry{offset: i} cv = (cv >> 8) | (uint32(src[i+4]) << 24) } diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7b..d58b3fe423 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -143,7 +143,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f9..8c8baa4fc2 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac7..41db94cded 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -85,7 +85,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f0..a97cf1b5d3 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { offsetIdx := len(s.Out) s.Out = append(s.Out, sixZeros[:]...) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup wg.Add(4) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { }(i) } wg.Wait() - for i := 0; i < 4; i++ { + for i := range 4 { o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 0f56b02d74..7d0efa8818 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { @@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index ba7e8e6b02..99ddd4af97 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { var br [4]bitReaderShifted // Decode "jump table" start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0a..67d9e05b6c 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ @@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go index 0cfb5c0e27..4f2a0d8c58 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) { } // Store64 will store v at b. -func Store64(b []byte, v uint64) { - binary.LittleEndian.PutUint64(b, v) +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) } diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go index ada45cd909..218a38bc4a 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 { // Store16 will store v at b. func Store16(b []byte, v uint16) { - //binary.LittleEndian.PutUint16(b, v) *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v } // Store32 will store v at b. func Store32(b []byte, v uint32) { - //binary.LittleEndian.PutUint32(b, v) *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v } -// Store64 will store v at b. -func Store64(b []byte, v uint64) { - //binary.LittleEndian.PutUint64(b, v) - *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go index 40796a49d6..a2c82fcd22 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -209,7 +209,7 @@ func (r *Reader) fill() error { if !r.readFull(r.buf[:len(magicBody)], false) { return r.err } - for i := 0; i < len(magicBody); i++ { + for i := range len(magicBody) { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt return r.err diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go index 13c6040a5d..860a994167 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -20,8 +20,10 @@ import ( func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b0..b22b297e62 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -88,7 +88,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 0dd742fd2a..2329e996f8 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -54,11 +54,11 @@ const ( ) var ( - huffDecoderPool = sync.Pool{New: func() interface{} { + huffDecoderPool = sync.Pool{New: func() any { return &huff0.Scratch{} }} - fseDecoderPool = sync.Pool{New: func() interface{} { + fseDecoderPool = sync.Pool{New: func() any { return &fseDecoder{} }} ) @@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if compMode&3 != 0 { return errors.New("corrupt block: reserved bits not zero") } - for i := uint(0); i < 3; i++ { + for i := range uint(3) { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { println("Table", tableIndex(i), "is", mode) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index ea2a19376c..30df5513d5 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) if uint64(size) > d.o.maxDecodedSize { size = int(d.o.maxDecodedSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b7b83164bc..2ffbfdf379 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { hist := o.History contents := o.Contents debug := o.DebugOut != nil - println := func(args ...interface{}) { + println := func(args ...any) { if o.DebugOut != nil { fmt.Fprintln(o.DebugOut, args...) } } - printf := func(s string, args ...interface{}) { + printf := func(s string, args ...any) { if o.DebugOut != nil { fmt.Fprintf(o.DebugOut, s, args...) } } - print := func(args ...interface{}) { + print := func(args ...any) { if o.DebugOut != nil { fmt.Fprint(o.DebugOut, args...) } @@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { } // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } + avgSize := min(litTotal, huff0.BlockSizeMax/2) huffBuff := make([]byte, 0, avgSize) // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } + div := max(litTotal/avgSize, 1) if debug { println("Huffman weights:") } @@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { huffBuff = append(huffBuff, 255) } scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { + for tries := range 255 { scratch = &huff0.Scratch{TableLog: 11} _, _, err = huff0.Compress1X(huffBuff, scratch) if err == nil { @@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { // Bail out.... Just generate something huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { + for i := range 128 { huffBuff = append(huffBuff, byte(i)) } continue diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 7d250c67f5..c1192ec38f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -8,7 +8,7 @@ import ( ) const ( - dictShardBits = 6 + dictShardBits = 7 ) type fastBase struct { @@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // or a window size small enough to contain the input size, if > 0. func (e *fastBase) WindowSize(size int64) int32 { if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } + b := max( + // Keep minimum window. + int32(1)< tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { s-- offset-- @@ -382,10 +377,7 @@ encodeLoop: nextEmit = s // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } + end := min(s, sLimit+4) off := index0 + e.cur for index0 < end { cv0 := load6432(src, index0) @@ -444,10 +436,7 @@ encodeLoop: nextEmit = s // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } + end := min(s, sLimit-4) off := index0 + e.cur for index0 < end { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 84a79fde76..85dcd28c32 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -190,10 +190,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -252,10 +249,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -480,10 +474,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -719,10 +710,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -783,10 +771,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -1005,10 +990,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d36be7bd8c..cf8cad00dc 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -13,7 +13,7 @@ const ( dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table @@ -149,10 +149,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -266,10 +263,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -462,10 +456,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -576,10 +567,7 @@ encodeLoop: l := int32(matchLen(src[s+4:], src[t+4:])) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -809,10 +797,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -927,10 +912,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7da..9180a3a582 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -143,10 +143,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -223,10 +220,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -387,10 +381,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -469,10 +460,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -655,10 +643,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -735,10 +720,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index e47af66e7c..d88f067e5c 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error { if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } + d.WindowSize = max(d.FrameContentSize, MinWindowSize) if d.WindowSize > d.o.maxDecodedSize { if debugDecoder { printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index ab26326a8f..3a0f4e7fbe 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9a7de82f9e..0bfb0e43c7 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) if debugDecoder { println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index c59f17e07a..1f8c3cec28 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeSyncAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], @@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC func (s *sequenceDecs) decode(seqs []seqVals) error { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], diff --git a/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 0000000000..2efc0497bf --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index a17381b8f8..336c288930 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { if !r.readFull(r.buf[:len(snappyMagicBody)], false) { return written, r.err } - for i := 0; i < len(snappyMagicBody); i++ { + for i := range len(snappyMagicBody) { if r.buf[i] != snappyMagicBody[i] { println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) r.err = ErrSnappyCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 29c15c8c4e..3198d71892 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -19,7 +19,7 @@ const ZipMethodWinZip = 93 const ZipMethodPKWare = 20 // zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { +var zipReaderPool = sync.Pool{New: func() any { z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 6252b46ae6..1a869710d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -98,13 +98,13 @@ var ( ErrDecoderNilInput = errors.New("nil input provided as reader") ) -func println(a ...interface{}) { +func println(a ...any) { if debug || debugDecoder || debugEncoder { log.Println(a...) } } -func printf(format string, a ...interface{}) { +func printf(format string, a ...any) { if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go index dee9e47e4a..e687a08c96 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/common.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -34,6 +34,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 248a7ccb15..a645c41605 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -581,12 +581,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -619,8 +624,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index 628f8fd687..824b282c83 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -45,8 +45,8 @@ func IsValid(v string) bool { // Canonical returns the canonical formatting of the semantic version v. // It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. +// Two semantic versions compare equal only if their canonical formatting +// is an identical string. // The canonical invalid semantic version is the empty string. func Canonical(v string) string { p, ok := parse(v) diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go index e99c92f39c..e783a94374 100644 --- a/vendor/golang.org/x/oauth2/deviceauth.go +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "mime" "net/http" "net/url" "strings" @@ -116,10 +117,38 @@ func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAu return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ + retrieveError := &RetrieveError{ Response: r, Body: body, } + + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") + default: + var tj struct { + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` + } + if json.Unmarshal(body, &tj) != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI + } + + return nil, retrieveError } da := &DeviceAuthResponse{} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index e2eb9c9272..7d1fdd31d3 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -252,7 +252,7 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any + // Refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any // refreshes earlier are a waste of compute. earlyExpirySecs := 225 * time.Second return computeTokenSource(account, earlyExpirySecs, scope...) diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 3e3b630695..5c527d31fd 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -98,7 +98,7 @@ const ( // in the POST body as application/x-www-form-urlencoded parameters. AuthStyleInParams AuthStyle = 1 - // AuthStyleInHeader sends the client_id and client_password + // AuthStyleInHeader sends the client_id and client_secret // using HTTP Basic Authorization. This is an optional style // described in the OAuth2 RFC 6749 section 2.3.1. AuthStyleInHeader AuthStyle = 2 diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index cea8374d51..f99384f0f5 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { return base64.RawURLEncoding.EncodeToString(sha[:]) } -// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// S256ChallengeOption derives a PKCE code challenge from the verifier with // method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 239ec32962..e995eebb5e 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -103,7 +103,7 @@ func (t *Token) WithExtra(extra any) *Token { } // Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a +// Extra fields are key-value pairs returned by the server as // part of the token retrieval response. func (t *Token) Extra(key string) any { if raw, ok := t.raw.(map[string]any); ok { diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 8bbebbac9e..9922ec3316 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -58,7 +58,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { var cancelOnce sync.Once // CancelRequest does nothing. It used to be a legacy cancellation mechanism -// but now only it only logs on first use to warn that it's deprecated. +// but now only logs on first use to warn that it's deprecated. // // Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { diff --git a/vendor/modules.txt b/vendor/modules.txt index 80c96f69d2..f89d8ada78 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -492,8 +492,8 @@ github.com/cncf/xds/go/xds/type/v3 # github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be ## explicit github.com/common-nighthawk/go-figure -# github.com/containerd/stargz-snapshotter/estargz v0.16.3 -## explicit; go 1.22.0 +# github.com/containerd/stargz-snapshotter/estargz v0.18.1 +## explicit; go 1.24.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/coreos/go-oidc/v3 v3.14.1 @@ -850,8 +850,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.20.6 -## explicit; go 1.24 +# github.com/google/go-containerregistry v0.20.7 +## explicit; go 1.24.0 github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression github.com/google/go-containerregistry/internal/estargz @@ -1111,8 +1111,8 @@ github.com/kballard/go-shellquote # github.com/kelseyhightower/envconfig v1.4.0 ## explicit github.com/kelseyhightower/envconfig -# github.com/klauspost/compress v1.18.0 -## explicit; go 1.22 +# github.com/klauspost/compress v1.18.1 +## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -1784,7 +1784,7 @@ github.com/transparency-dev/tessera/internal/witness github.com/transparency-dev/tessera/storage/gcp github.com/transparency-dev/tessera/storage/gcp/antispam github.com/transparency-dev/tessera/storage/internal -# github.com/vbatts/tar-split v0.12.1 +# github.com/vbatts/tar-split v0.12.2 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar # github.com/x448/float16 v0.8.4 @@ -2047,7 +2047,7 @@ golang.org/x/crypto/ssh/terminal ## explicit; go 1.23.0 golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.29.0 +# golang.org/x/mod v0.30.0 ## explicit; go 1.24.0 golang.org/x/mod/semver golang.org/x/mod/sumdb/dirhash @@ -2064,7 +2064,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.32.0 +# golang.org/x/oauth2 v0.33.0 ## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/authhandler